title: “TunedToLearn_AnticipatoryConvergence_Notebook”
output: html_notebook

Analysis notebook for “Tuned to Learn: An anticipatory hippocampal convergence state conducive to memory formation revealed during midbrain activation”

# Load required packages
pkgs = c("tidyverse","magrittr","lme4","lmerTest","emmeans","cowplot","gghalves","mediation","sjstats")

inst = lapply(pkgs, library, character.only=TRUE)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.2     ✓ purrr   0.3.4
## ✓ tibble  3.0.1     ✓ dplyr   1.0.0
## ✓ tidyr   1.1.0     ✓ stringr 1.4.0
## ✓ readr   1.3.1     ✓ forcats 0.5.0
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()
## 
## Attaching package: 'magrittr'
## The following object is masked from 'package:purrr':
## 
##     set_names
## The following object is masked from 'package:tidyr':
## 
##     extract
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
## 
## Attaching package: 'lmerTest'
## The following object is masked from 'package:lme4':
## 
##     lmer
## The following object is masked from 'package:stats':
## 
##     step
## 
## ********************************************************
## Note: As of version 1.0.0, cowplot does not change the
##   default ggplot2 theme anymore. To recover the previous
##   behavior, execute:
##   theme_set(theme_cowplot())
## ********************************************************
## Loading required package: MASS
## 
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
## 
##     select
## Loading required package: mvtnorm
## Loading required package: sandwich
## mediation: Causal Mediation Analysis
## Version: 4.5.0

Initialization

# Plot Parameters

hcolor1<-"#FF7C00"
lcolor1<-"#02859E"

hcolor2 <- "#b14200"
lcolor2 <- "#007356"

mcolor1<-"#120940"
mcolor2<-"#8b83b5"

# Figure parameters
bwidth<-0.8 # Barwidth
sub_dotsizeB <- 3 # size of dot for subject scatter in the behavioral plots
sub_dotsize <-4.5 # size of dot for subject scatter
sub_dotsizeS <-3.5 # size of dot for subject scatter in split plot(facet)
sub_dotalpha <-.5
mean_dotsize <-6.5 # size of dot for mean point
mean_dotsizeS <-5 # size of dot for mean point in split plot(facet)
mean_dotalpha<-.7 
erbar_thick <- .5 # thickness of error bars
erbar_width <- .1 # width of ends of error bar
erbar_alpha <- 1 
line_alpha <- .3 # Alpha of line connecting points
theme_bs <- 21 # Theme base size
plotbar_width <-2.25

#figfldr <-"~/Dropbox/A_WorkingDocuments/ICE/ice-learningstate/Figures_Pub_pdf_v6/"
#revfigfldr <-"~/Dropbox/A_Projects_Manuscripts/Submitted/ICE/Manuscript/Revision_Figures/"
figsave<-0

Data summary

Number of subjects: 23 Total trials across all subjects: 3582 After removing trials neither High nor Low Curiosity: 3307 After accounting for curiosity and removing trials that are NaN for memory: 3188

Number of High Curiosity trials: 1599 Number of Low Curiosity trials: 1589

Mean Number of High Curiosity Hits: 43.8 (Min = 25 (subj104), Max = 68) Mean Number of High Curiosity Misses: 25.74 (Min = 4 (subj 119), Max = 44) Mean Number of Low Curiosity Hits: 27.3 (Min = 9 (subj114), Max = 50) Mean Number of Low Curiosity Misses: 41.8 (Min = 18 (subj102), Max = 61)

Lowest number of total valid trials: Subj102 - 82 trial (44 High)

Behavioral Data analysis (Group Average)

Data Prep

full_dat <- read.csv("Data_csv/SourceData.csv")
full_dat <- drop_na(full_dat,c("curHighLow","memory"))
full_dat <- rename(full_dat,Motor=motor,
                   Memory=memory)
beh_raw <- full_dat[,1:12]
sub_col <- c("subj","curHighLow","Motor","Memory")
beh_raw[sub_col] <- lapply(beh_raw[sub_col],factor)
beh_raw$Hits <- as.numeric(as.character(beh_raw$Memory))

beh_avg1 = beh_raw %>% group_by(subj,curHighLow,Motor) %>% summarise(mem = mean(Hits),
                                                                     mem_count = sum(Hits),
                                                                     knowing = mean(likelihoodZ),
                                                                     RT_mean = mean(motorRTs),
                                                                     RT_median = median(motorRTs))
## `summarise()` regrouping output by 'subj', 'curHighLow' (override with `.groups` argument)
beh_avg2 = beh_raw %>% group_by(subj,curHighLow) %>% summarise(mem = mean(Hits),
                                                                     mem_count = sum(Hits),
                                                                     knowing = mean(likelihoodZ),
                                                                     RT_mean = mean(motorRTs),
                                                                     RT_median = median(motorRTs))
## `summarise()` regrouping output by 'subj' (override with `.groups` argument)

Analysis for memory performance with repeated measure ANOVA

mem_mdl1 <- aov(mem ~ curHighLow * Motor + Error(subj/(curHighLow * Motor)),data=beh_avg1)
summary(mem_mdl1)
## 
## Error: subj
##           Df Sum Sq Mean Sq F value Pr(>F)
## Residuals 22  2.045 0.09294               
## 
## Error: subj:curHighLow
##            Df Sum Sq Mean Sq F value  Pr(>F)    
## curHighLow  1 1.2487  1.2487   86.15 4.6e-09 ***
## Residuals  22 0.3189  0.0145                    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: subj:Motor
##           Df Sum Sq Mean Sq F value  Pr(>F)   
## Motor      1 0.1608 0.16077   13.38 0.00138 **
## Residuals 22 0.2643 0.01201                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: subj:curHighLow:Motor
##                  Df  Sum Sq Mean Sq F value Pr(>F)  
## curHighLow:Motor  1 0.04204 0.04204   6.178  0.021 *
## Residuals        22 0.14971 0.00681                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
eta_sq(mem_mdl1,partial=T)
##                    term    partial.etasq Eta_Sq_partial
## 1       subj:curHighLow       curHighLow          0.797
## 2 subj:curHighLow:Motor curHighLow:Motor          0.219
## 3            subj:Motor            Motor          0.378
# Post-hoc Comparisons
em1 <-emmeans(mem_mdl1, ~curHighLow * Motor)
## Note: re-fitting model with sum-to-zero contrasts
em1
##  curHighLow Motor emmean    SE   df lower.CL upper.CL
##  0          0      0.333 0.037 38.8    0.258    0.408
##  1          0      0.609 0.037 38.8    0.534    0.684
##  0          1      0.459 0.037 38.8    0.384    0.534
##  1          1      0.649 0.037 38.8    0.575    0.724
## 
## Warning: EMMs are biased unless design is perfectly balanced 
## Confidence level used: 0.95
hc_m <- c(0,0,0,1)
hc_n <- c(0,1,0,0)
lc_m <- c(0,0,1,0)
lc_n <- c(1,0,0,0)

comp<- contrast(em1, method=list("HC Motor - NoMotor" = hc_m - hc_n,
                          "LC Motor - NoMotor" = lc_m - lc_n,
                          "HC - LC_Motor" = hc_m-lc_m,
                          "HC - LC_NoMotor" = hc_n - lc_n),adjust="bonferroni") 
comp
##  contrast           estimate     SE   df t.ratio p.value
##  HC Motor - NoMotor   0.0409 0.0286 40.9 1.428   0.6434 
##  LC Motor - NoMotor   0.1264 0.0286 40.9 4.417   0.0003 
##  HC - LC_Motor        0.1903 0.0304 38.9 6.252   <.0001 
##  HC - LC_NoMotor      0.2758 0.0304 38.9 9.062   <.0001 
## 
## P value adjustment: bonferroni method for 4 tests
confint(comp)
##  contrast           estimate     SE   df lower.CL upper.CL
##  HC Motor - NoMotor   0.0409 0.0286 40.9  -0.0339    0.116
##  LC Motor - NoMotor   0.1264 0.0286 40.9   0.0516    0.201
##  HC - LC_Motor        0.1903 0.0304 38.9   0.1106    0.270
##  HC - LC_NoMotor      0.2758 0.0304 38.9   0.1961    0.355
## 
## Confidence level used: 0.95 
## Conf-level adjustment: bonferroni method for 4 estimates

There was a significant main effect of both curiosity and action contingency with a significant interaction. Follow-up paired comparisons showed that Action-contingency only had an effect on memory in the low curiosity condition, whereby memory for action-contingent trial was better than memory for non action trials.

Results when collapsing across Action contingency.

mem_mdl2 <- aov(mem ~ curHighLow + Error(subj/curHighLow), data=beh_avg2)
summary(mem_mdl2)
## 
## Error: subj
##           Df Sum Sq Mean Sq F value Pr(>F)
## Residuals 22  1.031 0.04684               
## 
## Error: subj:curHighLow
##            Df Sum Sq Mean Sq F value  Pr(>F)    
## curHighLow  1 0.6201  0.6201   86.81 4.3e-09 ***
## Residuals  22 0.1572  0.0071                    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
eta_sq(mem_mdl2,partial=T)
##              term partial.etasq Eta_Sq_partial
## 1 subj:curHighLow    curHighLow          0.798
# two-sample t-tests
hcur_avg <- subset(beh_avg2,curHighLow==1)
lcur_avg <- subset(beh_avg2,curHighLow==0)

hit_pairedt <- t.test(hcur_avg$mem,lcur_avg$mem,paired=T)
hit_pairedt
## 
##  Paired t-test
## 
## data:  hcur_avg$mem and lcur_avg$mem
## t = 9.317, df = 22, p-value = 4.303e-09
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.1805227 0.2838977
## sample estimates:
## mean of the differences 
##               0.2322102
# Summary Statistics
mem_summary_stats = beh_avg2 %>% group_by(curHighLow) %>% summarise(mem_mean = mean(mem), 
                                                             mem_sd = sd(mem),
                                                             mem_sem = sd(mem)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
mem_summary_stats
## # A tibble: 2 x 4
##   curHighLow mem_mean mem_sd mem_sem
##   <fct>         <dbl>  <dbl>   <dbl>
## 1 0             0.397  0.160  0.0334
## 2 1             0.629  0.168  0.0351
# Effect size
tmp_diff <- hcur_avg$mem - lcur_avg$mem
diff_mean <- mean(tmp_diff)
diff_sd <- sd(tmp_diff)
cohend <- diff_mean / diff_sd
cohend
## [1] 1.942737
## Plot memory performance based on curiosity 
## Memory plot with scatter and box
colors <-c("0"=lcolor1,"1"=hcolor1)

  
ggplot(data=beh_avg2, aes(y=mem)) +
  geom_point(data=beh_avg2 %>% filter(curHighLow=="0"), aes(x=curHighLow), color=lcolor1, size= sub_dotsizeB, alpha=sub_dotalpha) +
  geom_point(data=beh_avg2 %>% filter(curHighLow=="1"), aes(x=curHighLow), color=hcolor1, size= sub_dotsizeB, alpha=sub_dotalpha) +
  geom_line(aes(x = curHighLow, group = subj), color = 'lightgray', alpha = .5) +
  geom_half_boxplot(data = beh_avg2 %>% filter(curHighLow=="0"), aes(x=curHighLow, y = mem), position = position_nudge(x = -.25),side = "r",outlier.shape = NA, center = TRUE, errorbar.draw = TRUE, width = .25, fill = lcolor1, alpha = .5) +
  geom_half_boxplot(data = beh_avg2 %>% filter(curHighLow=="1"), aes(x=curHighLow, y = mem), position = position_nudge(x = .15),side = "r",outlier.shape = NA, center = TRUE, errorbar.draw = TRUE, width = .25, fill = hcolor1, alpha = .5) +
  geom_half_violin(data = beh_avg2 %>% filter(curHighLow=="0"),aes(x = curHighLow, y = mem), position = position_nudge(x = -0.3),side = "l", fill = lcolor1, alpha = .5, trim = F) +
  geom_half_violin(data = beh_avg2 %>% filter(curHighLow=="1"),aes(x = curHighLow, y = mem), position = position_nudge(x = 0.3),side = "r", fill = hcolor1, alpha = .5, trim = F) +
  labs(y="Memory Recall", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.6,.6)) + 
  coord_cartesian(ylim=c(0,1)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <- paste(figfldr, "Fig_MemoryPerformance_Cur.pdf",sep="")
  ggsave(fname)
}
## Plot memory performance based on curiosity and action
## Memory plot with scatter and box
colors <-c("0"=lcolor1,"1"=hcolor1)
labels <- c("0"="No Action","1"="Action")
  
ggplot(data=beh_avg1, aes(y=mem)) +
  geom_point(data=beh_avg1 %>% filter(curHighLow=="0"), aes(x=curHighLow), color=lcolor1, size= sub_dotsizeB, alpha=sub_dotalpha) +
  geom_point(data=beh_avg1 %>% filter(curHighLow=="1"), aes(x=curHighLow), color=hcolor1, size= sub_dotsizeB, alpha=sub_dotalpha) +
  geom_line(aes(x = curHighLow, group = subj), color = 'lightgray', alpha = .5) +
  geom_half_boxplot(data = beh_avg1 %>% filter(curHighLow=="0"), aes(x=curHighLow, y = mem), position = position_nudge(x = -.25),side = "r",outlier.shape = NA, center = TRUE, errorbar.draw = TRUE, width = .25, fill = lcolor1, alpha = .5) +
  geom_half_boxplot(data = beh_avg1 %>% filter(curHighLow=="1"), aes(x=curHighLow, y = mem), position = position_nudge(x = .15),side = "r",outlier.shape = NA, center = TRUE, errorbar.draw = TRUE, width = .25, fill = hcolor1, alpha = .5) +
  geom_half_violin(data = beh_avg1 %>% filter(curHighLow=="0"),aes(x = curHighLow, y = mem), position = position_nudge(x = -0.3),side = "l", fill = lcolor1, alpha = .5, trim = F) +
  geom_half_violin(data = beh_avg1 %>% filter(curHighLow=="1"),aes(x = curHighLow, y = mem), position = position_nudge(x = 0.3),side = "r", fill = hcolor1, alpha = .5, trim = F) +
  labs(y="Memory Recall", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.6,.6)) + 
  coord_cartesian(ylim=c(0,1)) + 
  facet_wrap(~Motor,labeller = labeller(Motor=labels)) +
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <- paste(figfldr, "Fig_MemoryPerformance_CurMot.pdf",sep="")
  ggsave(fname)
}

To account for the influence of prior knowledge we also ran a mixed model analysis which included self-reported likelihood of knowing as a covariate.

mem_lm1 <- glmer(Memory ~ curHighLow + (1|subj), data=beh_raw, family = binomial)
summary(mem_lm1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ curHighLow + (1 | subj)
##    Data: beh_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4010.4   4028.6  -2002.2   4004.4     3185 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6941 -0.8732  0.3712  0.8149  2.1942 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4415   0.6645  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  -0.4671     0.1488  -3.138   0.0017 ** 
## curHighLow1   1.0566     0.0773  13.668   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.257
mem_lm2 <- glmer(Memory ~ curHighLow + likelihoodZ + (1|subj), data=beh_raw, family = binomial)
summary(mem_lm2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ curHighLow + likelihoodZ + (1 | subj)
##    Data: beh_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   3887.2   3911.5  -1939.6   3879.2     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6289 -0.8122  0.3167  0.8104  2.4819 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.5156   0.718   
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept) -0.19417    0.16152  -1.202    0.229    
## curHighLow1  0.74334    0.08291   8.965   <2e-16 ***
## likelihoodZ  0.55022    0.05113  10.762   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1
## curHighLow1 -0.275       
## likelihoodZ  0.156 -0.306

When controlling for Likelihood of knowing, curiosity remained a significant predictor of subsequent memory.

We also ran the same model with the z-scored curiosity rating instead of the binarised category of high vs low curiosity.

mem_lm3 <- glmer(Memory ~ curiosityZ + (1|subj), data=beh_raw, family = binomial)
summary(mem_lm3)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ curiosityZ + (1 | subj)
##    Data: beh_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   3993.3   4011.5  -1993.6   3987.3     3185 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8720 -0.8376  0.3658  0.8302  2.6742 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4649   0.6818  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.08304    0.14735   0.564    0.573    
## curiosityZ   0.47603    0.03368  14.135   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##            (Intr)
## curiosityZ 0.012
mem_lm4 <- glmer(Memory ~ curiosityZ + likelihoodZ + (1|subj), data=beh_raw, family = binomial)
summary(mem_lm4)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ curiosityZ + likelihoodZ + (1 | subj)
##    Data: beh_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   3878.2   3902.4  -1935.1   3870.2     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8791 -0.8058  0.3226  0.7997  2.6582 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.5273   0.7262  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.18925    0.15694   1.206    0.228    
## curiosityZ   0.33832    0.03603   9.390   <2e-16 ***
## likelihoodZ  0.53605    0.05141  10.427   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crstyZ
## curiosityZ  -0.011       
## likelihoodZ  0.076 -0.319

Results remained similar when using z-scored curiosity ratings instead of the binarised levels. This should also be expected, given that the ratings for High and Low curiosity should already be highly separated due to the tertile split used during question selection.

For trials with motor demand we also additionally examined if RT is related to subsequent memory.

beh_motor <- subset(beh_raw,Motor==1)
beh_motor$motorRTsZ <- ave(beh_motor$motorRTs,beh_motor$subj,FUN=scale)

mem_lm5 <- glmer(Memory ~ curiosityZ + likelihoodZ + (1|subj), data=beh_motor, family = binomial)
summary(mem_lm5)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ curiosityZ + likelihoodZ + (1 | subj)
##    Data: beh_motor
## 
##      AIC      BIC   logLik deviance df.resid 
##   1939.4   1960.8   -965.7   1931.4     1571 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2636 -0.8544  0.3895  0.7942  2.1856 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.6203   0.7876  
## Number of obs: 1575, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.36398    0.17443   2.087   0.0369 *  
## curiosityZ   0.26590    0.05313   5.005 5.59e-07 ***
## likelihoodZ  0.49112    0.07386   6.649 2.95e-11 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crstyZ
## curiosityZ  -0.005       
## likelihoodZ  0.090 -0.353
mem_lm6 <- glmer(Memory ~ curiosityZ + likelihoodZ + motorRTsZ + (1|subj), data=beh_motor, family = binomial)
summary(mem_lm6)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ curiosityZ + likelihoodZ + motorRTsZ + (1 | subj)
##    Data: beh_motor
## 
##      AIC      BIC   logLik deviance df.resid 
##   1940.8   1967.6   -965.4   1930.8     1570 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3491 -0.8525  0.3910  0.7933  2.2038 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.6192   0.7869  
## Number of obs: 1575, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.36324    0.17429   2.084   0.0371 *  
## curiosityZ   0.26337    0.05323   4.948 7.49e-07 ***
## likelihoodZ  0.48806    0.07400   6.596 4.23e-11 ***
## motorRTsZ   -0.04230    0.05650  -0.749   0.4540    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crstyZ lklhdZ
## curiosityZ  -0.005              
## likelihoodZ  0.090 -0.350       
## motorRTsZ    0.005  0.060  0.052
anova(mem_lm5,mem_lm6)
## Data: beh_motor
## Models:
## mem_lm5: Memory ~ curiosityZ + likelihoodZ + (1 | subj)
## mem_lm6: Memory ~ curiosityZ + likelihoodZ + motorRTsZ + (1 | subj)
##         npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## mem_lm5    4 1939.4 1960.8 -965.68   1931.4                     
## mem_lm6    5 1940.8 1967.6 -965.40   1930.8 0.5595  1     0.4544

When RT for the action demand trials was included as a predictor, RT was not a significant predictor. This suggests that the effect of curiosity on memory was unlikely to be purely related to changes in vigilance/attention.

fMRI Analysis

Data Prep

dist_raw <- full_dat
dist_raw[sub_col] <- lapply(dist_raw[sub_col],factor)

Anticipatory Univariate activation

We first looked at the effect of curiosity on anticipatory activation in the MTL and also in the mesolimbic circuit.

A trial-by-trial linear mixed model approach was used. Curiosity and Action contingency were modeled as fixed effects and subject was included as random intercepts.

VTA

wroi <- "Q_VTA_50_noSN_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow","Motor",wroi)] 
colnames(tmp_act)[4]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity X Motor
q.vta.act_cm <-lmer(Act ~ curHighLow*Motor + (1|subj), data=tmp_act, REML=F)
summary(q.vta.act_cm)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow * Motor + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   8433.4   8469.8  -4210.7   8421.4     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5306 -0.6212 -0.0142  0.6374  3.8848 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.02337  0.1529  
##  Residual             0.81234  0.9013  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                      Estimate Std. Error         df t value Pr(>|t|)  
## (Intercept)         7.788e-02  4.502e-02  5.759e+01   1.730   0.0890 .
## curHighLow1         9.972e-02  4.490e-02  3.166e+03   2.221   0.0264 *
## Motor1              3.840e-02  4.524e-02  3.166e+03   0.849   0.3960  
## curHighLow1:Motor1 -8.195e-03  6.388e-02  3.166e+03  -0.128   0.8979  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1 Motor1
## curHighLow1 -0.499              
## Motor1      -0.495  0.496       
## crHghLw1:M1  0.351 -0.703 -0.708
emmeans(q.vta.act_cm, list(pairwise ~ curHighLow), adjust = "tukey", pbkrtest.limit = 3188)
## NOTE: Results may be misleading due to involvement in interactions
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0          0.0971 0.0399 34.7  0.00392    0.190
##  1          0.1927 0.0398 34.5  0.09964    0.286
## 
## Results are averaged over the levels of: Motor 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate     SE   df t.ratio p.value
##  0 - 1     -0.0956 0.0319 3168 -2.993  0.0028 
## 
## Results are averaged over the levels of: Motor 
## Degrees-of-freedom method: kenward-roger
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow,Motor) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow,Motor) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` regrouping output by 'curHighLow' (override with `.groups` argument)
labels <- c("0"="No Action","1"="Action")

ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory VTA BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  facet_wrap(~Motor,labeller = labeller(Motor=labels)) +
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_CM_qVTA.pdf",sep="")
  ggsave(fname)
}

In the VTA, there is a significant main effect of curiosity, with no main effect of action and no interaction.

HPC

wroi <- "Q_HPC_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow","Motor",wroi)] 
colnames(tmp_act)[4]<-"Act"
tmp_act <- na.omit(tmp_act)


# Curiosity X Motor
q.hpc.act_cm <-lmer(Act ~ curHighLow*Motor + (1|subj), data=tmp_act, REML=F)
summary(q.hpc.act_cm)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow * Motor + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   7774.9   7811.3  -3881.4   7762.9     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.1287 -0.6297  0.0102  0.6145  4.2123 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.02205  0.1485  
##  Residual             0.66015  0.8125  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                      Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)           0.20278    0.04219   53.64646   4.806 1.28e-05 ***
## curHighLow1           0.01965    0.04047 3166.34624   0.485    0.627    
## Motor1                0.05045    0.04078 3166.13474   1.237    0.216    
## curHighLow1:Motor1   -0.01259    0.05759 3166.50761  -0.219    0.827    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1 Motor1
## curHighLow1 -0.480              
## Motor1      -0.476  0.496       
## crHghLw1:M1  0.338 -0.703 -0.708
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow,Motor) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow,Motor) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` regrouping output by 'curHighLow' (override with `.groups` argument)
labels <- c("0"="No Action","1"="Action")

ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory HPC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  facet_wrap(~Motor,labeller = labeller(Motor=labels)) +
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_CM_qHPC.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_CM_qHPC.pdf",sep="")
  ggsave(fname)
}

No effect of curiosity or action in the HPC.

MTL-PHC

wroi <- "Q_PHC_mask_MNI152_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow","Motor",wroi)] 
colnames(tmp_act)[4]<-"Act"
tmp_act <- na.omit(tmp_act)


# Curiosity X Motor
q.phc.act_cm <-lmer(Act ~ curHighLow*Motor + (1|subj), data=tmp_act, REML=F)
summary(q.phc.act_cm)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow * Motor + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   8002.1   8038.5  -3995.0   7990.1     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8233 -0.6072 -0.0077  0.6419  4.6010 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.04315  0.2077  
##  Residual             0.70626  0.8404  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                      Estimate Std. Error         df t value Pr(>|t|)   
## (Intercept)           0.15324    0.05249   39.74207   2.919  0.00575 **
## curHighLow1           0.08006    0.04186 3165.88288   1.912  0.05592 . 
## Motor1                0.04848    0.04218 3165.72212   1.149  0.25057   
## curHighLow1:Motor1   -0.04049    0.05957 3165.98156  -0.680  0.49668   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1 Motor1
## curHighLow1 -0.399              
## Motor1      -0.396  0.496       
## crHghLw1:M1  0.281 -0.703 -0.708
emmeans(q.phc.act_cm, list(pairwise ~ curHighLow), adjust = "tukey", pbkrtest.limit = 3188)
## NOTE: Results may be misleading due to involvement in interactions
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0           0.177 0.0492 29.4   0.0616    0.293
##  1           0.237 0.0491 29.3   0.1215    0.353
## 
## Results are averaged over the levels of: Motor 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate     SE   df t.ratio p.value
##  0 - 1     -0.0598 0.0298 3168 -2.008  0.0447 
## 
## Results are averaged over the levels of: Motor 
## Degrees-of-freedom method: kenward-roger
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow,Motor) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow,Motor) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` regrouping output by 'curHighLow' (override with `.groups` argument)
labels <- c("0"="No Action","1"="Action")

ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory PHC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  facet_wrap(~Motor,labeller = labeller(Motor=labels)) +
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_CM_qPHC.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_CM_qPHC.pdf",sep="")
  ggsave(fname)
}

In the PHC there was a marginal effect of curiosity (p=.055) with no effect of action or interaction.

MTL-PRC

wroi <- "Q_PRC_mask_MNI152_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow","Motor",wroi)] 
colnames(tmp_act)[4]<-"Act"
tmp_act <- na.omit(tmp_act)


# Curiosity X Motor
q.prc.act_cm <-lmer(Act ~ curHighLow*Motor + (1|subj), data=tmp_act, REML=F)
summary(q.prc.act_cm)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow * Motor + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   6963.2   6999.6  -3475.6   6951.2     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.1430 -0.6161 -0.0245  0.5877  6.3786 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.01489  0.1220  
##  Residual             0.51218  0.7157  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                      Estimate Std. Error         df t value Pr(>|t|)   
## (Intercept)           0.11551    0.03584   57.75795   3.223  0.00209 **
## curHighLow1           0.08538    0.03565 3166.33731   2.395  0.01667 * 
## Motor1                0.05522    0.03592 3166.11687   1.537  0.12434   
## curHighLow1:Motor1   -0.04876    0.05072 3166.51756  -0.961  0.33650   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1 Motor1
## curHighLow1 -0.498              
## Motor1      -0.494  0.496       
## crHghLw1:M1  0.350 -0.703 -0.708
emmeans(q.prc.act_cm, list(pairwise ~ curHighLow), adjust = "tukey", pbkrtest.limit = 3188)
## NOTE: Results may be misleading due to involvement in interactions
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0           0.143 0.0318 34.6   0.0689    0.217
##  1           0.204 0.0317 34.4   0.1300    0.278
## 
## Results are averaged over the levels of: Motor 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate     SE   df t.ratio p.value
##  0 - 1      -0.061 0.0254 3168 -2.405  0.0162 
## 
## Results are averaged over the levels of: Motor 
## Degrees-of-freedom method: kenward-roger
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow,Motor) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow,Motor) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` regrouping output by 'curHighLow' (override with `.groups` argument)
labels <- c("0"="No Action","1"="Action")

ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory PRC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  facet_wrap(~Motor,labeller = labeller(Motor=labels)) +
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_CM_qPRC.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_CM_qPRC.pdf",sep="")
  ggsave(fname)
}

In the PRC, there was a significant main effect of curiosity with no effect of action or interaction.

As there was no effect of action or interaction, we collapsed across all action-contingency to increase power for detection of curiosity-related effects which are of primary interest.

—————-CURIOSITY ONLY MODEL ———————– ### VTA

wroi <- "Q_VTA_50_noSN_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
q.vta.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(q.vta.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   8430.6   8454.9  -4211.3   8422.6     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5086 -0.6261 -0.0126  0.6344  3.9055 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.02336  0.1528  
##  Residual             0.81264  0.9015  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)   
## (Intercept) 9.678e-02  3.911e-02 3.293e+01   2.474  0.01867 * 
## curHighLow1 9.580e-02  3.193e-02 3.165e+03   3.000  0.00272 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.410
emmeans(q.vta.act_c, list(pairwise ~ curHighLow), adjust = "tukey", pbkrtest.limit = 3188)
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0          0.0968 0.0399 34.7  0.00363    0.190
##  1          0.1926 0.0398 34.5  0.09953    0.286
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate     SE   df t.ratio p.value
##  0 - 1     -0.0958 0.0319 3166 -2.999  0.0027 
## 
## Degrees-of-freedom method: kenward-roger
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory VTA BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_qVTA.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_qVTA.pdf",sep="")
  ggsave(fname)
}

HPC

wroi <- "Q_HPC_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
q.hpc.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(q.hpc.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   7773.3   7797.6  -3882.6   7765.3     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.1581 -0.6361  0.0032  0.6183  4.2439 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.02207  0.1486  
##  Residual             0.66065  0.8128  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 2.276e-01  3.712e-02 3.215e+01   6.132 7.28e-07 ***
## curHighLow1 1.360e-02  2.879e-02 3.165e+03   0.472    0.637    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.389
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory HPC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_qHPC.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_qHPC.pdf",sep="")
  ggsave(fname)
}

MTL-PHC

wroi <- "Q_PHC_mask_MNI152_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
q.phc.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(q.phc.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   7999.4   8023.7  -3995.7   7991.4     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8512 -0.6099 -0.0117  0.6390  4.6304 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.04319  0.2078  
##  Residual             0.70656  0.8406  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 1.771e-01  4.822e-02 2.828e+01   3.673 0.000992 ***
## curHighLow1 6.016e-02  2.978e-02 3.165e+03   2.020 0.043425 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.310
emmeans(q.phc.act_c, list(pairwise ~ curHighLow), adjust = "tukey", pbkrtest.limit = 3188)
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0           0.177 0.0492 29.4   0.0612    0.293
##  1           0.237 0.0492 29.3   0.1214    0.353
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate     SE   df t.ratio p.value
##  0 - 1     -0.0602 0.0298 3166 -2.020  0.0435 
## 
## Degrees-of-freedom method: kenward-roger
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory PHC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_qPHC.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_qPHC.pdf",sep="")
  ggsave(fname)
}

MTL-PRC

wroi <- "Q_PHC_mask_MNI152_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
q.prc.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(q.prc.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   7999.4   8023.7  -3995.7   7991.4     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8512 -0.6099 -0.0117  0.6390  4.6304 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.04319  0.2078  
##  Residual             0.70656  0.8406  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 1.771e-01  4.822e-02 2.828e+01   3.673 0.000992 ***
## curHighLow1 6.016e-02  2.978e-02 3.165e+03   2.020 0.043425 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.310
emmeans(q.prc.act_c, list(pairwise ~ curHighLow), adjust = "tukey", pbkrtest.limit = 3188)
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0           0.177 0.0492 29.4   0.0612    0.293
##  1           0.237 0.0492 29.3   0.1214    0.353
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate     SE   df t.ratio p.value
##  0 - 1     -0.0602 0.0298 3166 -2.020  0.0435 
## 
## Degrees-of-freedom method: kenward-roger
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Anticipatory PRC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_qPRC.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_qPRC.pdf",sep="")
  ggsave(fname)
}

During the anticipatory phase, there was a significant main effect of curiosity in the VTA and the PRC, with a trending effect observed in the PHC. There no effect of curiosity in the Hippocampus.

Univariate activation predicting memory including all ROIs

ROI - Memory (Anticipatory Phase)

q.rois.act_mem<- glmer(Memory ~ Q_VTA_50_noSN_bin_mean_act + Q_HPC_mean_act + Q_PHC_mask_MNI152_bin_mean_act + Q_PRC_mask_MNI152_bin_mean_act + (1|subj), family = binomial, data=dist_raw)
summary(q.rois.act_mem)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ Q_VTA_50_noSN_bin_mean_act + Q_HPC_mean_act + Q_PHC_mask_MNI152_bin_mean_act +  
##     Q_PRC_mask_MNI152_bin_mean_act + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4204.5   4240.9  -2096.2   4192.5     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.1584 -0.8965  0.4977  0.8905  1.7504 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.3925   0.6265  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                                Estimate Std. Error z value Pr(>|z|)  
## (Intercept)                     0.05238    0.13635   0.384   0.7009  
## Q_VTA_50_noSN_bin_mean_act      0.12060    0.04932   2.445   0.0145 *
## Q_HPC_mean_act                 -0.05106    0.08087  -0.631   0.5278  
## Q_PHC_mask_MNI152_bin_mean_act -0.02085    0.06682  -0.312   0.7550  
## Q_PRC_mask_MNI152_bin_mean_act  0.03795    0.06859   0.553   0.5801  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Q_VTA_ Q_HPC_ Q_PHC_
## Q_VTA_50_SN  0.000                     
## Q_HPC_mn_ct -0.029 -0.369              
## Q_PHC__MNI1 -0.011  0.032 -0.571       
## Q_PRC__MNI1 -0.018 -0.040 -0.374 -0.154
# Including an interaction term for VTA
q.vta_mem_base <- glmer(Memory ~ Q_VTA_50_noSN_bin_mean_act + (1|subj), family=binomial,data=dist_raw)
q.vta_mem_cur <- glmer(Memory ~ Q_VTA_50_noSN_bin_mean_act  + Q_VTA_50_noSN_bin_mean_act:curHighLow + (1|subj), family=binomial,data=dist_raw)
anova(q.vta_mem_base,q.vta_mem_cur)
## Data: dist_raw
## Models:
## q.vta_mem_base: Memory ~ Q_VTA_50_noSN_bin_mean_act + (1 | subj)
## q.vta_mem_cur: Memory ~ Q_VTA_50_noSN_bin_mean_act + Q_VTA_50_noSN_bin_mean_act:curHighLow + (1 | subj)
##                npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## q.vta_mem_base    3 4199.6 4217.8 -2096.8   4193.6                     
## q.vta_mem_cur     4 4199.9 4224.1 -2095.9   4191.9 1.7169  1     0.1901
# Using only low curiosity trials for VTA
dist_lc <-subset(dist_raw,curHighLow==0)
q.vta_mem_lc<- glmer(Memory ~ Q_VTA_50_noSN_bin_mean_act + (1|subj), family = binomial, data=dist_lc)
summary(q.vta_mem_lc)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ Q_VTA_50_noSN_bin_mean_act + (1 | subj)
##    Data: dist_lc
## 
##      AIC      BIC   logLik deviance df.resid 
##   2032.1   2048.2  -1013.1   2026.1     1586 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.5013 -0.8277 -0.5388  1.0102  2.3952 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4517   0.6721  
## Number of obs: 1589, groups:  subj, 23
## 
## Fixed effects:
##                            Estimate Std. Error z value Pr(>|z|)   
## (Intercept)                -0.48169    0.15069  -3.197  0.00139 **
## Q_VTA_50_noSN_bin_mean_act  0.11581    0.05972   1.939  0.05248 . 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## Q_VTA_50_SN -0.046

When all ROIs are included, only VTA was a significant predictor of subsequent memory. To examine if this was purely driven by difference associated with curiosity state, a separate model of VTA was compared with a model that includes an interaction term. This did not result in a better model fit. Separate model that included only low curiosity trial showed a continued trend for significant effect (p=.052). The inclusion of a random slope for VTA resulted in a singular fit, and as such a random slope was not included.

Control analysis - Including curiosity as a continuous covariate

q.vta_mem_ctrl <- glmer(Memory ~ Q_VTA_50_noSN_bin_mean_act  + curiosityZ + (1|subj), family=binomial,data=dist_raw)
summary(q.vta_mem_ctrl)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ Q_VTA_50_noSN_bin_mean_act + curiosityZ + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   3992.1   4016.4  -1992.1   3984.1     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8227 -0.8393  0.3658  0.8337  2.7596 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4684   0.6844  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                            Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                 0.07192    0.14800   0.486   0.6270    
## Q_VTA_50_noSN_bin_mean_act  0.07620    0.04266   1.786   0.0741 .  
## curiosityZ                  0.47377    0.03371  14.055   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Q_VTA_
## Q_VTA_50_SN -0.042       
## curiosityZ   0.013 -0.029

When including curiosity rating and VTA activation within a single model, only curiosity rating was a significant predictor. This is unsurprising given that variation in VTA activation should primarily be driven by differences in curiosity.

Plot parameter estimates for univariate activation and memory (Anticipatory)

betas<-fixef(q.rois.act_mem)[2:5]
odds<-exp(betas)
se<-sqrt(diag(vcov(q.rois.act_mem)))[2:5]
oddse<-sqrt(odds^2 * se)
pdat<-data.frame(betas)
pdat$odds<-odds
pdat$se<-se
pdat$oddse<-oddse

rownames<-c("VTA\nActivation\n",
            "HPC\nActivation\n",
            "PHC\nActivation\n",
            "PRC\nActivation\n")

sortedrownames<-c( "HPC\nActivation\n",
                   "PHC\nActivation\n",
                   "PRC\nActivation\n",
                   "VTA\nActivation\n")

sortedrownames2<-c( "VTA\nActivation\n",
                    "PRC\nActivation\n",
                    "PHC\nActivation\n",
                    "HPC\nActivation\n")


pdat$labels<-rownames
pdat$labels <-factor(pdat$labels,levels=sortedrownames)

## Beta on X-axis
ggplot(pdat,aes(x=betas,y=labels)) +
  geom_point(data=pdat, aes(x=betas,y=labels), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(y=labels, xmin=betas-se,xmax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_vline(xintercept=0,linetype="dashed")  +
  labs(y="", x="Parameter Estimate") + 
  coord_cartesian(xlim=c(-0.2,0.2)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_Q_Mem_PE1.pdf",sep="")
  ggsave(fname)
}

pdat$labels <-factor(pdat$labels,levels=sortedrownames2)
## Beta on Y-axis
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_point(data=pdat, aes(x=labels,y=betas), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="dashed")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.2,0.2)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_Q_Mem_PE2.pdf",sep="")
  ggsave(fname)
}


## Beta on Y-axis (bar graphs)
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_bar(data=pdat, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=.6, color="black") + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="solid")  +
  labs(x="", y="Parameter Estimate") + 
  annotate(x=1,y=.2,geom="text",label="*",size=10) +
  coord_cartesian(ylim=c(-0.15,0.25)) +
  theme_classic(base_size=theme_bs) +
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_Q_Mem_PE_bar.pdf",sep="")
  ggsave(fname)
}

## Separate PE plot for each ROI
for (i in 1:4){
  pdat_t = pdat[i,]
  ggplot(pdat_t,aes(x=labels,y=betas)) +
    geom_bar(data=pdat_t, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=1, color="black") + 
    geom_errorbar(data=pdat_t,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
    geom_hline(yintercept=0,linetype="solid")  +
    labs(x="", y="Parameter Estimate") + 
    #annotate(x=1,y=.2,geom="text",label="*",size=10) +
    coord_cartesian(ylim=c(-0.15,0.25),xlim=c(.5,1.5)) +
    theme_classic(base_size=theme_bs) +
    theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),axis.ticks.x=element_blank(), axis.text.x=element_blank())
  wroi <- substr(pdat_t$labels,1,3)
  if (figsave == 1){
    fname <-paste(figfldr,wroi , "_Q_Mem_PE_", plotbar_width, ".pdf",sep="")
    ggsave(fname, width=plotbar_width,height=4.5)
  }
}

Slopes for each ROI on predicted memory outcome (Anticipation) Note that for the individual plots, prediction is done based on the full model, and not separately fitted for individual model. So the slope would not fully match the parameter estimate in the full model.

wroi <- c("Q_VTA_50_noSN_bin_mean_act","Q_HPC_mean_act","Q_PHC_mask_MNI152_bin_mean_act","Q_PRC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
tmp_dat$FittedMem <- fitted(q.rois.act_mem)
tmp_dat$FittedMem_dm <- tmp_dat$FittedMem - ave(tmp_dat$FittedMem,tmp_dat$subj,FUN=mean)


## VTA
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_VTA_50_noSN_bin_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
  geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_VTA_50_noSN_bin_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.15,.15)) +
  labs(y="Δ Probability of Recall", x="Anticipatory BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_qVTA-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## HPC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_HPC_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_HPC_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.15,.15)) +
  labs(y="Δ Probability of Recall", x="Anticipatory BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_qHPC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## PHC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PHC_mask_MNI152_bin_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PHC_mask_MNI152_bin_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.15,.15)) +
  labs(y="Δ Probability of Recall", x="Anticipatory BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_qPHC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## PRC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PRC_mask_MNI152_bin_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PRC_mask_MNI152_bin_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.15,.15)) +
  labs(y="Δ Probability of Recall", x="Anticipatory BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Uni_qPRC-Mem.pdf",sep="")
  #ggsave(fname, device=cairo_pdf)
}

For completeness, the same analysis was repeated for the Answer phase where to-be-encoded information was presented. ### VTA

wroi <- "A_VTA_50_noSN_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
a.vta.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(a.vta.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   8352.4   8376.7  -4172.2   8344.4     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0197 -0.6253  0.0059  0.6128  5.7056 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.02755  0.166   
##  Residual             0.79207  0.890   
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 2.906e-01  4.122e-02 3.188e+01   7.050 5.52e-08 ***
## curHighLow1 5.993e-02  3.153e-02 3.166e+03   1.901   0.0574 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.384
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Answer phase VTA BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  # fname <-paste(figfldr, "fMRI_Uni_aVTA.eps",sep="")
  # ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_Uni_aVTA.pdf",sep="")
  ggsave(fname)
}

HPC

wroi <- "A_HPC_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
a.hpc.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(a.hpc.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   7699.0   7723.3  -3845.5   7691.0     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.4977 -0.5969 -0.0031  0.6037  4.5811 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.03765  0.194   
##  Residual             0.64319  0.802   
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)    0.23152    0.04521   28.51985   5.121  1.9e-05 ***
## curHighLow1   -0.02003    0.02841 3165.34992  -0.705    0.481    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.316
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Answer phase HPC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_aHPC.pdf",sep="")
  ggsave(fname)
}

MTL-PHC

wroi <- "A_PHC_mask_MNI152_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
a.phc.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(a.phc.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   8019.2   8043.4  -4005.6   8011.2     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.3949 -0.6165 -0.0042  0.6174  4.9591 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.03764  0.1940  
##  Residual             0.71161  0.8436  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)    0.19484    0.04569   29.05558   4.265 0.000194 ***
## curHighLow1   -0.01678    0.02988 3165.37172  -0.562 0.574438    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.328
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Answer phase PHC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_aPHC.pdf",sep="")
  ggsave(fname)
}

MTL-PRC

wroi <- "A_PRC_mask_MNI152_bin_mean_act"
tmp_act <- dist_raw[c("subj","curHighLow",wroi)] 
colnames(tmp_act)[3]<-"Act"
tmp_act <- na.omit(tmp_act)

# Curiosity only
a.prc.act_c <-lmer(Act ~ curHighLow + (1|subj), data=tmp_act, REML=F)
summary(a.prc.act_c)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Act ~ curHighLow + (1 | subj)
##    Data: tmp_act
## 
##      AIC      BIC   logLik deviance df.resid 
##   6549.1   6573.3  -3270.5   6541.1     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.4611 -0.5818 -0.0113  0.6072  6.5394 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.01947  0.1395  
##  Residual             0.44927  0.6703  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 1.837e-01  3.363e-02 3.030e+01   5.461 6.14e-06 ***
## curHighLow1 3.624e-02  2.375e-02 3.165e+03   1.526    0.127    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.354
## Plot
# Dot as mean
tmp_act2 <- tmp_act %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_act2 %>% group_by(curHighLow) %>% summarise(act = mean(Act), 
                                                     act_sem = sd(Act)/sqrt(n())
)
## `summarise()` ungrouping output (override with `.groups` argument)
ggplot() +
  geom_point(data=tmp_act2, aes(x = as.numeric(curHighLow), y = Act, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_act2, aes(x=as.numeric(curHighLow), y = Act, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=act), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=act), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
   geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=act-act_sem,ymax=act+act_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="Answer phase PRC BOLD\n(a.u.)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_aPRC.pdf",sep="")
  ggsave(fname)
}

Univariate activation predicting memory including all ROIs

ROI - Memory (Answer Phase)

a.rois.act_mem<- glmer(Memory ~ A_VTA_50_noSN_bin_mean_act + A_HPC_mean_act + A_PHC_mask_MNI152_bin_mean_act + A_PRC_mask_MNI152_bin_mean_act + (1|subj), family = binomial, data=dist_raw)
summary(a.rois.act_mem)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ A_VTA_50_noSN_bin_mean_act + A_HPC_mean_act + A_PHC_mask_MNI152_bin_mean_act +  
##     A_PRC_mask_MNI152_bin_mean_act + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4195.6   4232.0  -2091.8   4183.6     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3814 -0.8948  0.4935  0.8886  1.7945 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.3902   0.6246  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                                Estimate Std. Error z value Pr(>|z|)   
## (Intercept)                     0.01909    0.13635   0.140   0.8886   
## A_VTA_50_noSN_bin_mean_act      0.01951    0.05104   0.382   0.7022   
## A_HPC_mean_act                 -0.12071    0.08411  -1.435   0.1513   
## A_PHC_mask_MNI152_bin_mean_act  0.08821    0.06744   1.308   0.1908   
## A_PRC_mask_MNI152_bin_mean_act  0.22279    0.07607   2.929   0.0034 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) A_VTA_ A_HPC_ A_PHC_
## A_VTA_50_SN -0.067                     
## A_HPC_mn_ct  0.004 -0.393              
## A_PHC__MNI1 -0.001  0.050 -0.558       
## A_PRC__MNI1 -0.039 -0.038 -0.381 -0.178
# PRC random slope model
a.rois.act_mem2<- glmer(Memory ~ A_VTA_50_noSN_bin_mean_act + A_HPC_mean_act + A_PHC_mask_MNI152_bin_mean_act + A_PRC_mask_MNI152_bin_mean_act + (1 + A_PRC_mask_MNI152_bin_mean_act|subj), family = binomial, data=dist_raw)
anova(a.rois.act_mem,a.rois.act_mem2)
## Data: dist_raw
## Models:
## a.rois.act_mem: Memory ~ A_VTA_50_noSN_bin_mean_act + A_HPC_mean_act + A_PHC_mask_MNI152_bin_mean_act + A_PRC_mask_MNI152_bin_mean_act + (1 | subj)
## a.rois.act_mem2: Memory ~ A_VTA_50_noSN_bin_mean_act + A_HPC_mean_act + A_PHC_mask_MNI152_bin_mean_act + A_PRC_mask_MNI152_bin_mean_act + (1 + A_PRC_mask_MNI152_bin_mean_act | subj)
##                 npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## a.rois.act_mem     6 4195.6 4232.0 -2091.8   4183.6                     
## a.rois.act_mem2    8 4199.0 4247.6 -2091.5   4183.0 0.5832  2     0.7471

Only activation in the PRC was a significant predictor of memory at answer. The inclusion of a random slope for PRC did not result in a better model fit.

betas<-fixef(a.rois.act_mem)[2:5]
odds<-exp(betas)
se<-sqrt(diag(vcov(a.rois.act_mem)))[2:5]
oddse<-sqrt(odds^2 * se)
pdat<-data.frame(betas)
pdat$odds<-odds
pdat$se<-se
pdat$oddse<-oddse

rownames<-c("VTA\nActivation\n",
            "HPC\nActivation\n",
            "PHC\nActivation\n",
            "PRC\nActivation\n")

sortedrownames<-c( "HPC\nActivation\n",
                   "PHC\nActivation\n",
                   "PRC\nActivation\n",
                   "VTA\nActivation\n")

sortedrownames2<-c( "VTA\nActivation\n",
                    "PRC\nActivation\n",
                    "PHC\nActivation\n",
                    "HPC\nActivation\n")


pdat$labels<-rownames
pdat$labels <-factor(pdat$labels,levels=sortedrownames)

## Beta on X-axis
ggplot(pdat,aes(x=betas,y=labels)) +
  geom_point(data=pdat, aes(x=betas,y=labels), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(y=labels, xmin=betas-se,xmax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_vline(xintercept=0,linetype="dashed")  +
  labs(y="", x="Parameter Estimate") + 
  coord_cartesian(xlim=c(-0.2,0.35)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_A_Mem_PE1.pdf",sep="")
  ggsave(fname)
}

pdat$labels <-factor(pdat$labels,levels=sortedrownames2)
## Beta on Y-axis
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_point(data=pdat, aes(x=labels,y=betas), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="dashed")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.2,0.35)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_A_Mem_PE2.pdf",sep="")
  ggsave(fname)
}

## Beta on Y-axis (bar graphs)
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_bar(data=pdat, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=.6, color="black") + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="solid")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.2,0.4)) +
  annotate(x=2,y=.35,geom="text",label="**",size=10) +
  theme_classic(base_size=theme_bs) +
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_A_Mem_PE_bar.pdf",sep="")
  ggsave(fname)
}

## Separate PE plot for each ROI
for (i in 1:4){
  pdat_t = pdat[i,]
  ggplot(pdat_t,aes(x=labels,y=betas)) +
    geom_bar(data=pdat_t, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=1, color="black") + 
    geom_errorbar(data=pdat_t,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
    geom_hline(yintercept=0,linetype="solid")  +
    labs(x="", y="Parameter Estimate") + 
    coord_cartesian(ylim=c(-0.2,0.4),xlim=c(.5,1.5)) +
    theme_classic(base_size=theme_bs) +
    theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),axis.ticks.x=element_blank(), axis.text.x=element_blank())
  wroi <- substr(pdat_t$labels,1,3)
  if (figsave == 1){
    fname <-paste(figfldr,wroi , "_A_Mem_PE_", plotbar_width, ".pdf",sep="")
    ggsave(fname, width=plotbar_width,height=4.5)
  }
}

Slopes for each ROI on predicted memory outcome (Answer) Note that for the individual plots, prediction is done based on the full model, and not separately fitted for individual model. So the slope would not fully match the parameter estimate in the full model

wroi <- c("A_VTA_50_noSN_bin_mean_act","A_HPC_mean_act","A_PHC_mask_MNI152_bin_mean_act","A_PRC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
tmp_dat$FittedMem <- fitted(a.rois.act_mem)
tmp_dat$FittedMem_dm <- tmp_dat$FittedMem - ave(tmp_dat$FittedMem,tmp_dat$subj,FUN=mean)

## VTA
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = A_VTA_50_noSN_bin_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = A_VTA_50_noSN_bin_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.2,.2)) +
  labs(y="Δ Probability of Recall", x="Answer Phase BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_aVTA-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## HPC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = A_HPC_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = A_HPC_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.2,.2)) +
  labs(y="Δ Probability of Recall", x="Answer Phase BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_aHPC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## PHC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x =A_PRC_mask_MNI152_bin_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = A_PRC_mask_MNI152_bin_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.2,.2)) +
  labs(y="Δ Probability of Recall", x="Answer Phase BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_aPRC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## PRC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x =A_PRC_mask_MNI152_bin_mean_act, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = A_PRC_mask_MNI152_bin_mean_act, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.2,.2)) +
  labs(y="Δ Probability of Recall", x="Answer Phase BOLD (a.u)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Uni_aPRC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

Convergence Analysis - Question

While anticipatory univariate HPC activity was not influenced by curiosity or subsequent memory, there may still be differences in distributed patterns of activation.

Prior work has shown that MTL & HPC activity prior to learning can influence subsequent learning outcome. It has been proposed that activity in the HPC may spontaneously fluctuate in and out of a ‘learning state’. However, little is known about how such learning state may be represented by multivariate activities within the MTL/HPC, and how they are influenced by neuromodulatory inputs from the VTA.

Anna Karenina principle - Successful encoding is likely to arise from the convergence of multiple factors, while failed encoding can occur due to a multitude of factors. To this end, we expect that anticipatory states related to succesful encoding should occupy a more central position in state space (i.e. greater ‘convergence’), while anticipatory states leading to failed encoding should exhibit greater entropy and to occupy more peripheral positions in state space.

For this analysis, activation for each trial is assume to occupy a single point in n-dimensional state space. To examine the ‘convergence’ of each point, the correlation distance of each point (trial) relative to a centroid (computed based on all other trials from a different run).It is expected that points occupying a central position would have a lower average distance (to all other points).

To examine if convergence/centrality in state space is related to subsequent memory, we ran a simialr analysis examining the influence of curiosity on representational distance, and to examine if this was predictive of subsequent learning, a mixed-effects logistics regression was conducted on the trial by trial esimates with representation distance as a predictor for subsequent memory.

Note: All analyses are performed on the non Z-scored distances, and z-scoring of the distances are plotted for visualization purposes.

HPC (Curiosity)

wroi <- c("Q_HPC_p2c","Q_HPC_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)

q.hpc.dist_cur <- lmer(Dist ~ curHighLow + (1|subj),data = tmp_dat, REML=F)
summary(q.hpc.dist_cur)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1495.5  -1471.2    751.7  -1503.5     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3762 -0.6942 -0.0808  0.6346  3.8490 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.00458  0.06767 
##  Residual             0.03577  0.18913 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)    0.83462    0.01489   25.53887   56.04   <2e-16 ***
## curHighLow1   -0.01688    0.00670 3165.11001   -2.52   0.0118 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.226
emmeans(q.hpc.dist_cur, list(pairwise ~ curHighLow), adjust = "tukey",pbkrtest.limit = 3188)
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0           0.835 0.0152 26.7    0.799    0.871
##  1           0.818 0.0152 26.6    0.782    0.854
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate     SE   df t.ratio p.value
##  0 - 1      0.0169 0.0067 3166 2.520   0.0118 
## 
## Degrees-of-freedom method: kenward-roger
### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = DistZ, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = DistZ, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=distz), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=distz), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="HPC\nDistance from centroid (z)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_qHPC_cur.pdf",sep="")
  ggsave(fname)
}

We see a significant main effect of curiosity in the HPC with lower distances in High curiosity than low curiosity.

Additional plot for non-normalized HPC

wroi <- c("Q_HPC_p2c","Q_HPC_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)

### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = Dist, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = Dist, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=dist), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=dist), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=dist-dist_sem,ymax=dist+dist_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=dist-dist_sem,ymax=dist+dist_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="HPC\nDistance from centroid", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(revfigfldr, "fMRI_Dist_qHPC_cur_nonZ.pdf",sep="")
  ggsave(fname)
}

MTL-PHC (Curiosity)

wroi <- c("Q_PHC_mask_MNI152_bin_p2c","Q_PHC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)

q.phc.dist_cur <- lmer(Dist ~ curHighLow + (1|subj),data = tmp_dat, REML=F)
summary(q.phc.dist_cur)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##   1811.9   1836.2   -901.9   1803.9     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.1160 -0.7641 -0.1293  0.6654  4.1052 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.01223  0.1106  
##  Residual             0.10099  0.3178  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  7.935e-01  2.441e-02  2.577e+01  32.509   <2e-16 ***
## curHighLow1 -9.061e-03  1.126e-02  3.165e+03  -0.805    0.421    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.232
### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = DistZ, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = DistZ, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=distz), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=distz), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="PHC\nDistance from centroid (z)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_qPHC_cur.pdf",sep="")
  ggsave(fname)
}

No significant effect of curiosity on PHC.

Additional plot for non-normalized PHC

wroi <- c("Q_PHC_mask_MNI152_bin_p2c","Q_PHC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)

### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = Dist, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = Dist, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=dist), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=dist), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=dist-dist_sem,ymax=dist+dist_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=dist-dist_sem,ymax=dist+dist_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="PHC\nDistance from centroid", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(revfigfldr, "fMRI_Dist_qPHC_cur_nonZ.pdf",sep="")
  ggsave(fname)
}

MTL-PRC (Curiosity)

wroi <- c("Q_PRC_mask_MNI152_bin_p2c","Q_PRC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)

q.prc.dist_cur <- lmer(Dist ~ curHighLow + (1|subj),data = tmp_dat, REML=F)
summary(q.prc.dist_cur)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##   1208.1   1232.3   -600.0   1200.1     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.2108 -0.7491 -0.0805  0.6877  3.7408 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.007981 0.08933 
##  Residual             0.083696 0.28930 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 8.500e-01  2.000e-02 2.644e+01  42.498   <2e-16 ***
## curHighLow1 2.752e-03  1.025e-02 3.165e+03   0.269    0.788    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.257
### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = DistZ, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = DistZ, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=distz), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=distz), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="PRC\nDistance from centroid (z)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_qPRC_cur.pdf",sep="")
  ggsave(fname)
}

No significant effect of curiosity in the PRC.

Additional plot for non-normalized PHC

wroi <- c("Q_PRC_mask_MNI152_bin_p2c","Q_PRC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)

### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = Dist, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = Dist, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=dist), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=dist), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=dist-dist_sem,ymax=dist+dist_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=dist-dist_sem,ymax=dist+dist_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="PRC\nDistance from centroid", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(revfigfldr, "fMRI_Dist_qPRC_cur_nonZ.pdf",sep="")
  ggsave(fname)
}

Across the MTL ROIs, only the hippocampus showed a signifcant difference in convergence between High and Low curiosity.

Mixed logistic regression to predict memory using representation distance - Question

q.rois.dist_mem <- glmer(Memory ~Q_HPC_p2c + Q_PHC_mask_MNI152_bin_p2c + Q_PRC_mask_MNI152_bin_p2c + (1|subj), family = binomial, data=dist_raw)
summary(q.rois.dist_mem)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ Q_HPC_p2c + Q_PHC_mask_MNI152_bin_p2c + Q_PRC_mask_MNI152_bin_p2c +  
##     (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4200.8   4231.1  -2095.4   4190.8     3183 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.1713 -0.8965  0.4903  0.8956  1.7740 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.3858   0.6211  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                           Estimate Std. Error z value Pr(>|z|)   
## (Intercept)                0.50570    0.22980   2.201  0.02777 * 
## Q_HPC_p2c                 -0.54570    0.20623  -2.646  0.00814 **
## Q_PHC_mask_MNI152_bin_p2c -0.06023    0.12212  -0.493  0.62190   
## Q_PRC_mask_MNI152_bin_p2c  0.06181    0.13001   0.475  0.63447   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Q_HPC_ Q_PHC_
## Q_HPC_p2c   -0.568              
## Q_PHC__MNI1 -0.164 -0.272       
## Q_PRC__MNI1 -0.344 -0.123 -0.111
# Controlling for univariate activation in HPC
q.rois.dist_mem2 <- glmer(Memory ~Q_HPC_p2c + Q_HPC_mean_act + Q_PHC_mask_MNI152_bin_p2c + Q_PRC_mask_MNI152_bin_p2c + (1|subj), family = binomial, data=dist_raw)
summary(q.rois.dist_mem2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ Q_HPC_p2c + Q_HPC_mean_act + Q_PHC_mask_MNI152_bin_p2c +  
##     Q_PRC_mask_MNI152_bin_p2c + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4202.7   4239.1  -2095.4   4190.7     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.1684 -0.8958  0.4902  0.8952  1.7731 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.3858   0.6211  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                            Estimate Std. Error z value Pr(>|z|)   
## (Intercept)                0.513444   0.241141   2.129  0.03324 * 
## Q_HPC_p2c                 -0.548605   0.208040  -2.637  0.00836 **
## Q_HPC_mean_act            -0.005198   0.049013  -0.106  0.91553   
## Q_PHC_mask_MNI152_bin_p2c -0.063620   0.126225  -0.504  0.61425   
## Q_PRC_mask_MNI152_bin_p2c  0.060096   0.131017   0.459  0.64646   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Q_HPC_2 Q_HPC__ Q_PHC_
## Q_HPC_p2c   -0.577                       
## Q_HPC_mn_ct -0.303  0.131                
## Q_PHC__MNI1 -0.228 -0.228   0.253        
## Q_PRC__MNI1 -0.362 -0.105   0.124  -0.076
# Include curiosity as interaction term
q.hpc.dist_mem_base <- glmer(Memory ~Q_HPC_p2c + (1|subj), family = binomial, data=dist_raw)
q.hpc.dist_mem_cur <- glmer(Memory ~Q_HPC_p2c + Q_HPC_p2c:curHighLow + (1|subj), family = binomial, data=dist_raw)
anova(q.hpc.dist_mem_base,q.hpc.dist_mem_cur)
## Data: dist_raw
## Models:
## q.hpc.dist_mem_base: Memory ~ Q_HPC_p2c + (1 | subj)
## q.hpc.dist_mem_cur: Memory ~ Q_HPC_p2c + Q_HPC_p2c:curHighLow + (1 | subj)
##                     npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## q.hpc.dist_mem_base    3 4197.2 4215.4 -2095.6   4191.2                     
## q.hpc.dist_mem_cur     4 4007.9 4032.2 -2000.0   3999.9 191.28  1  < 2.2e-16
##                        
## q.hpc.dist_mem_base    
## q.hpc.dist_mem_cur  ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(q.hpc.dist_mem_cur)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ Q_HPC_p2c + Q_HPC_p2c:curHighLow + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4007.9   4032.2  -1999.9   3999.9     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7124 -0.8626  0.3712  0.8442  2.5069 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4452   0.6673  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                       Estimate Std. Error z value Pr(>|z|)    
## (Intercept)            0.44165    0.22088   1.999   0.0456 *  
## Q_HPC_p2c             -1.07628    0.20691  -5.202 1.98e-07 ***
## Q_HPC_p2c:curHighLow1  1.23906    0.09174  13.506  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Q_HPC_p2
## Q_HPC_p2c   -0.738         
## Q_HPC_2:HL1 -0.020 -0.194
emSlope <- emtrends(q.hpc.dist_mem_cur, "curHighLow",var="Q_HPC_p2c")
emSlope
##  curHighLow Q_HPC_p2c.trend    SE  df asymp.LCL asymp.UCL
##  0                   -1.076 0.207 Inf    -1.482    -0.671
##  1                    0.163 0.209 Inf    -0.248     0.573
## 
## Confidence level used: 0.95
pairs(emSlope)
##  contrast estimate     SE  df z.ratio p.value
##  0 - 1       -1.24 0.0917 Inf -13.506 <.0001
q.rois.dist_mem3 <- glmer(Memory ~Q_HPC_p2c + Q_PHC_mask_MNI152_bin_p2c + Q_PRC_mask_MNI152_bin_p2c + Q_HPC_p2c:curHighLow + (1|subj), family = binomial, data=dist_raw)
summary(q.rois.dist_mem3)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ Q_HPC_p2c + Q_PHC_mask_MNI152_bin_p2c + Q_PRC_mask_MNI152_bin_p2c +  
##     Q_HPC_p2c:curHighLow + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4011.6   4048.0  -1999.8   3999.6     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7195 -0.8629  0.3704  0.8440  2.4973 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4465   0.6682  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                           Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                0.44407    0.24021   1.849   0.0645 .  
## Q_HPC_p2c                 -1.05238    0.21752  -4.838 1.31e-06 ***
## Q_PHC_mask_MNI152_bin_p2c -0.06656    0.12632  -0.527   0.5983    
## Q_PRC_mask_MNI152_bin_p2c  0.03576    0.13432   0.266   0.7900    
## Q_HPC_p2c:curHighLow1      1.23889    0.09175  13.503  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Q_HPC_p2 Q_PHC_ Q_PRC_
## Q_HPC_p2c   -0.548                       
## Q_PHC__MNI1 -0.161 -0.268                
## Q_PRC__MNI1 -0.337 -0.122   -0.110       
## Q_HPC_2:HL1 -0.012 -0.181   -0.005 -0.014

When including representation distance of all 3 MTL ROIs, only the HPC was a significant predictor. The HPC remained a significant predictor after controlling for univariate activation.

The model that included curiosity as an interaction term led to a better model fit, suggesting a differential effect of HPC representation on memory in High and Low curiosity state. Followed-up comparison showed that convergence was a stronger predictor of memory in the Low curiosity condition. This may be driven by greater variability/range during low curiosity, whereas convergence is generally higher in the high curiosity condition.

Inclusion of a HPC random slope term resulted in singular fit, and as such a random slope was not included.

Control analysis - Including curiosity as a continuous covariate

q.rois.dist_mem_ctrl <- glmer(Memory ~Q_HPC_p2c + curiosityZ + (1|subj), family = binomial, data=dist_raw)
summary(q.rois.dist_mem_ctrl)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Memory ~ Q_HPC_p2c + curiosityZ + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   3990.6   4014.8  -1991.3   3982.6     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.0260 -0.8387  0.3637  0.8289  2.7749 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4624   0.68    
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.44717    0.22277   2.007   0.0447 *  
## Q_HPC_p2c   -0.44036    0.20240  -2.176   0.0296 *  
## curiosityZ   0.47303    0.03372  14.027   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##            (Intr) Q_HPC_
## Q_HPC_p2c  -0.751       
## curiosityZ -0.015  0.031

As a control, we also used curiosity ratings (instead of category) as a covariate. HPC convergence remained a significant predictor of memory after controlling for curiosity ratings, suggesting that the association of HPC on memory, was not purely driven by differences in curiosity.

Plot parameter estimates for distance and memory without interaction (Anticipation)

betas<-fixef(q.rois.dist_mem)[2:4]
odds<-exp(betas)
se<-sqrt(diag(vcov(q.rois.dist_mem)))[2:4]
oddse<-sqrt(odds^2 * se)
pdat<-data.frame(betas)
pdat$odds<-odds
pdat$se<-se
pdat$oddse<-oddse

rownames<-c(
            "HPC\nDistance\n",
            "PHC\nDistance\n",
            "PRC\nDistance\n")

sortedrownames<-c( "HPC\nDistance\n",
                   "PHC\nDistance\n",
                   "PRC\nDistance\n")

sortedrownames2<-c( 
                    "PRC\nDistance\n",
                    "PHC\nDistance\n",
                    "HPC\nDistance\n")


pdat$labels<-rownames
pdat$labels <-factor(pdat$labels,levels=sortedrownames)

## Beta on X-axis
ggplot(pdat,aes(x=betas,y=labels)) +
  geom_point(data=pdat, aes(x=betas,y=labels), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(y=labels, xmin=betas-se,xmax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_vline(xintercept=0,linetype="dashed")  +
  labs(y="", x="Parameter Estimate") + 
  coord_cartesian(xlim=c(-0.9,0.5)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_Q_Mem_PE1.pdf",sep="")
  ggsave(fname)
}

pdat$labels <-factor(pdat$labels,levels=sortedrownames2)
## Beta on Y-axis
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_point(data=pdat, aes(x=labels,y=betas), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="dashed")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.9,0.5)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_Q_Mem_PE2.pdf",sep="")
  ggsave(fname)
}

## Beta on Y-axis (bar graphs)
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_bar(data=pdat, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=.6, color="black") + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="solid")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.9,0.5)) +
  annotate(x=3,y= -.9, geom="text",label="**",size=10) +
  theme_classic(base_size=theme_bs) +
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_Q_Mem_PE_bar.pdf",sep="")
  ggsave(fname)
}

## Separate PE plot for each ROI
for (i in 1:3){
  pdat_t = pdat[i,]
  ggplot(pdat_t,aes(x=labels,y=betas)) +
    geom_bar(data=pdat_t, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=1, color="black") + 
    geom_errorbar(data=pdat_t,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
    geom_hline(yintercept=0,linetype="solid")  +
    labs(x="", y="Parameter Estimate") + 
    coord_cartesian(ylim=c(-0.9,0.5),xlim=c(.5,1.5)) +
    theme_classic(base_size=theme_bs) +
    theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),axis.ticks.x=element_blank(), axis.text.x=element_blank())
  wroi <- substr(pdat_t$labels,1,3)
  if (figsave == 1){
    fname <-paste(figfldr,wroi , "_Dist_Q_Mem_PE_", plotbar_width, ".pdf",sep="")
    ggsave(fname, width=plotbar_width,height=4.5)
  }
}

Slopes for each ROI on predicted memory outcome (Anticipation)

wroi <- c("Q_HPC_p2c", "Q_PHC_mask_MNI152_bin_p2c", "Q_PRC_mask_MNI152_bin_p2c")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
tmp_dat$FittedMem <- fitted(q.rois.dist_mem)
tmp_dat$FittedMem_dm <- tmp_dat$FittedMem - ave(tmp_dat$FittedMem,tmp_dat$subj,FUN=mean)

## HPC
tmp_dat$HPC_p2cZ <- ave(tmp_dat$Q_HPC_p2c,tmp_dat$subj,FUN=scale)
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = HPC_p2cZ, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = HPC_p2cZ, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.1,.1)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid (z)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_qHPC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## PHC
tmp_dat$PHC_p2cZ <- ave(tmp_dat$Q_PHC_mask_MNI152_bin_p2c,tmp_dat$subj,FUN=scale)
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PHC_p2cZ, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PHC_p2cZ, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.1,.1)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid (z)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_qPHC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

## PRC
tmp_dat$PRC_p2cZ <- ave(tmp_dat$Q_PRC_mask_MNI152_bin_p2c,tmp_dat$subj,FUN=scale)
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PRC_p2cZ, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PRC_p2cZ, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.1,.1)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid (z)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_qPRC-Mem.pdf",sep="")
  ggsave(fname, device=cairo_pdf)
}

Slopes for each ROI on predicted memory outcome (Anticipation) - Non Z-scored

wroi <- c("Q_HPC_p2c", "Q_PHC_mask_MNI152_bin_p2c", "Q_PRC_mask_MNI152_bin_p2c")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
tmp_dat$FittedMem <- fitted(q.rois.dist_mem)
tmp_dat$FittedMem_dm <- tmp_dat$FittedMem - ave(tmp_dat$FittedMem,tmp_dat$subj,FUN=mean)

## HPC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_HPC_p2c, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_HPC_p2c, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.1,.1)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
   fname <-paste(revfigfldr, "fMRI_Dist_qHPC-Mem_nonZ.pdf",sep="")
   ggsave(fname, device=cairo_pdf)
}

## PHC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PHC_mask_MNI152_bin_p2c, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PHC_mask_MNI152_bin_p2c, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.1,.1)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
   fname <-paste(revfigfldr, "fMRI_Dist_qPHC-Mem_nonZ.pdf",sep="")
   ggsave(fname, device=cairo_pdf)
}

## PRC
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PRC_mask_MNI152_bin_p2c, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = Q_PRC_mask_MNI152_bin_p2c, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.1,.1)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
   fname <-paste(revfigfldr, "fMRI_Dist_qPRC-Mem_nonZ.pdf",sep="")
   ggsave(fname, device=cairo_pdf)
}

Just as with the univariate data, the same analyses were repeated for the answer phase. ## Convergence Analysis - Answer ### HPC (Curiosity)

wroi <- c("A_HPC_p2c","A_HPC_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)
tmp_dat$ActZ <- ave(tmp_dat$Act,tmp_dat$subj,FUN=scale)


a.hpc.dist_cur <- lmer(Dist ~ curHighLow + (1|subj),data = tmp_dat, REML=F)
summary(a.hpc.dist_cur)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##   -468.6   -444.4    238.3   -476.6     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4444 -0.7339 -0.0638  0.6732  3.4617 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.003561 0.05967 
##  Residual             0.049557 0.22261 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.426e-01  1.365e-02  2.754e+01  61.747   <2e-16 ***
## curHighLow1 -2.511e-03  7.886e-03  3.165e+03  -0.318     0.75    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.290
### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = DistZ, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = DistZ, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=distz), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=distz), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="HPC\nDistance from centroid (z)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_aHPC_cur.pdf",sep="")
  ggsave(fname)
}

There was no effect of curiosity in the HPC during answer.

MTL-PHC (Curiosity)

wroi <- c("A_PHC_mask_MNI152_bin_p2c","A_PHC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)
tmp_dat$ActZ <- ave(tmp_dat$Act,tmp_dat$subj,FUN=scale)


anova(a.phc.dist_cur <- lmer(Dist ~ curHighLow + (1|subj),data = tmp_dat, REML=F))
## Type III Analysis of Variance Table with Satterthwaite's method
##              Sum Sq  Mean Sq NumDF  DenDF F value Pr(>F)
## curHighLow 0.032752 0.032752     1 3165.2  0.3431 0.5581
summary(a.phc.dist_cur)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##   1627.4   1651.6   -809.7   1619.4     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.1233 -0.7409 -0.1317  0.6612  3.4471 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.009067 0.09522 
##  Residual             0.095464 0.30897 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 8.260e-01  2.133e-02 2.648e+01  38.732   <2e-16 ***
## curHighLow1 6.411e-03  1.095e-02 3.165e+03   0.586    0.558    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.258
### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = DistZ, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = DistZ, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=distz), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=distz), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  labs(y="PHC\nDistance from centroid (z)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_aPHC_cur.pdf",sep="")
  ggsave(fname)
}

No effect of curiosity in the PHC.

MTL-PRC (Curiosity)

wroi <- c("A_PRC_mask_MNI152_bin_p2c","A_PRC_mask_MNI152_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow",wroi)]
colnames(tmp_dat)[3] <- "Dist"
colnames(tmp_dat)[4] <- "Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)
tmp_dat$ActZ <- ave(tmp_dat$Act,tmp_dat$subj,FUN=scale)


anova(a.prc.dist_cur <- lmer(Dist ~ curHighLow + (1|subj),data = tmp_dat, REML=F))
## Type III Analysis of Variance Table with Satterthwaite's method
##              Sum Sq  Mean Sq NumDF  DenDF F value Pr(>F)
## curHighLow 0.069457 0.069457     1 3165.5   0.833 0.3615
summary(a.prc.dist_cur)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##   1178.9   1203.2   -585.5   1170.9     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3285 -0.7536 -0.0891  0.7006  3.2311 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.003432 0.05859 
##  Residual             0.083384 0.28876 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.637e-01  1.421e-02  3.068e+01  60.766   <2e-16 ***
## curHighLow1 -9.336e-03  1.023e-02  3.165e+03  -0.913    0.361    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## curHighLow1 -0.361
### Plot
tmp_dat <- tmp_dat %>% group_by(subj,curHighLow) %>% summarise_all(mean)
tmp = tmp_dat %>% group_by(curHighLow) %>% summarise(dist = mean(Dist), 
                                                 dist_sem = sd(Dist)/sqrt(n()),
                                                 distz = mean(DistZ),
                                                 distz_sem = sd(DistZ)/sqrt(n())
) 
## `summarise()` ungrouping output (override with `.groups` argument)
# Dot as mean
ggplot() +
  geom_point(data=tmp_dat, aes(x = as.numeric(curHighLow), y = DistZ, colour = curHighLow), size = sub_dotsize, shape = 20,alpha=sub_dotalpha,show.legend=F) + 
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  geom_line(data=tmp_dat, aes(x=as.numeric(curHighLow), y = DistZ, group=subj),  color = 'lightgray', alpha = line_alpha) + 
  geom_point(data=tmp %>% filter(curHighLow=="0"), aes(x=curHighLow,y=distz), position = position_nudge(x = +.35), color=lcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_point(data=tmp %>% filter(curHighLow=="1"), aes(x=curHighLow,y=distz), position = position_nudge(x = -.35), color=hcolor1, size= mean_dotsize, alpha=mean_dotalpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="0") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = +.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  geom_errorbar(data=tmp %>% filter(curHighLow=="1") ,aes(x=curHighLow, ymin=distz-distz_sem,ymax=distz+distz_sem),position = position_nudge(x = -.35), width=erbar_width,lwd=erbar_thick,alpha=erbar_alpha) +
  guides(fill=guide_legend(title="Curiosity")) +
  #labs(y="PRC Representation\nDistance (z)", x="",color="Curiosity") + 
  labs(y="PRC\nDistance from centroid (z)", x="",color="Curiosity") + 
  scale_x_discrete(name = "Curiosity", labels=c("High","Low"), breaks=c("1","0"),expand=c(.3,.3)) + 
  theme_classic(base_size=theme_bs)

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_aPRC_cur.pdf",sep="")
  ggsave(fname)
}

No effect of curiosity in the PRC.

Across all 3 MTL ROIs, there was no effect of curiosity on convergence during the presentation of answers.

Mixed logistic regression to predict memory using representation distance - Answer

The same analysis was conducted to examined if convergence during Answer phase is associated with memory performance.

a.rois.dist_mem <- glmer(Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + (1|subj), family = binomial, data=dist_raw)
summary(a.rois.dist_mem)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c +  
##     (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4171.5   4201.9  -2080.8   4161.5     3183 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3717 -0.9069  0.4561  0.8867  2.2814 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.414    0.6434  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                           Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                0.84537    0.22193   3.809 0.000139 ***
## A_HPC_p2c                  0.09839    0.17876   0.550 0.582052    
## A_PHC_mask_MNI152_bin_p2c -0.43369    0.12834  -3.379 0.000727 ***
## A_PRC_mask_MNI152_bin_p2c -0.59203    0.13493  -4.388 1.14e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) A_HPC_ A_PHC_
## A_HPC_p2c   -0.464              
## A_PHC__MNI1 -0.214 -0.290       
## A_PRC__MNI1 -0.362 -0.143 -0.132
#Controlling for univariate activation of PHC and PRC
a.rois.dist_act_mem <- glmer(Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + A_PHC_mask_MNI152_bin_mean_act + A_PRC_mask_MNI152_bin_mean_act + (1|subj), family = binomial, data=dist_raw,control=glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=2e5)))
summary(a.rois.dist_act_mem)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c +  
##     A_PHC_mask_MNI152_bin_mean_act + A_PRC_mask_MNI152_bin_mean_act +  
##     (1 | subj)
##    Data: dist_raw
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   4172.1   4214.6  -2079.1   4158.1     3181 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3058 -0.9012  0.4610  0.8821  2.2311 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4159   0.6449  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                                 Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                     0.709142   0.239086   2.966  0.00302 ** 
## A_HPC_p2c                       0.146181   0.182650   0.800  0.42352    
## A_PHC_mask_MNI152_bin_p2c      -0.379389   0.132417  -2.865  0.00417 ** 
## A_PRC_mask_MNI152_bin_p2c      -0.558026   0.136856  -4.077 4.55e-05 ***
## A_PHC_mask_MNI152_bin_mean_act -0.008518   0.055934  -0.152  0.87896    
## A_PRC_mask_MNI152_bin_mean_act  0.115889   0.070194   1.651  0.09874 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##                  (Intr) A_HPC_ A_PHC__MNI152__2 A_PRC__MNI152__2
## A_HPC_p2c        -0.495                                         
## A_PHC__MNI152__2 -0.281 -0.229                                  
## A_PRC__MNI152__2 -0.392 -0.106 -0.087                           
## A_PHC__MNI152___ -0.179  0.133  0.086            0.085          
## A_PRC__MNI152___ -0.182  0.062  0.150            0.075          
##                  A_PHC__MNI152___
## A_HPC_p2c                        
## A_PHC__MNI152__2                 
## A_PRC__MNI152__2                 
## A_PHC__MNI152___                 
## A_PRC__MNI152___ -0.520
# Interaction with curiosity
a.rois.dist_mem2 <- glmer(Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + (A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c):curHighLow + (1|subj), family = binomial, data=dist_raw)
summary(a.rois.dist_mem2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c +  
##     (A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c):curHighLow +  
##     (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   3994.9   4037.3  -1990.4   3980.9     3181 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.9461 -0.8660  0.3633  0.8479  3.7911 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.4782   0.6915  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                                       Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                             0.8879     0.2319   3.829 0.000129 ***
## A_HPC_p2c                               0.1376     0.1837   0.749 0.453859    
## A_PHC_mask_MNI152_bin_p2c              -0.8605     0.1700  -5.062 4.16e-07 ***
## A_PRC_mask_MNI152_bin_p2c              -0.8491     0.1714  -4.953 7.31e-07 ***
## A_PHC_mask_MNI152_bin_p2c:curHighLow1   0.6997     0.2067   3.385 0.000711 ***
## A_PRC_mask_MNI152_bin_p2c:curHighLow1   0.4779     0.2010   2.378 0.017409 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##                   (Intr) A_HPC_ A_PHC_m_MNI152__2 A_PRC_m_MNI152__2
## A_HPC_p2c         -0.451                                           
## A_PHC_m_MNI152__2 -0.172 -0.237                                    
## A_PRC_m_MNI152__2 -0.295 -0.118 -0.386                             
## A_PHC__MNI152__2:  0.008  0.014 -0.623             0.512           
## A_PRC__MNI152__2:  0.007 -0.005  0.541            -0.580           
##                   A_PHC__MNI152__2:
## A_HPC_p2c                          
## A_PHC_m_MNI152__2                  
## A_PRC_m_MNI152__2                  
## A_PHC__MNI152__2:                  
## A_PRC__MNI152__2: -0.903
anova(a.rois.dist_mem,a.rois.dist_mem2)
## Data: dist_raw
## Models:
## a.rois.dist_mem: Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + (1 | subj)
## a.rois.dist_mem2: Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + (A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c):curHighLow + (1 | subj)
##                  npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)    
## a.rois.dist_mem     5 4171.5 4201.9 -2080.8   4161.5                         
## a.rois.dist_mem2    7 3994.9 4037.3 -1990.4   3980.9 180.67  2  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
emSlope_phc <- emtrends(a.rois.dist_mem2, "curHighLow",var="A_PHC_mask_MNI152_bin_p2c")
emSlope_phc
##  curHighLow A_PHC_mask_MNI152_bin_p2c.trend    SE  df asymp.LCL asymp.UCL
##  0                                   -0.861 0.170 Inf    -1.194    -0.527
##  1                                   -0.161 0.167 Inf    -0.488     0.166
## 
## Confidence level used: 0.95
pairs(emSlope_phc)
##  contrast estimate    SE  df z.ratio p.value
##  0 - 1        -0.7 0.207 Inf -3.385  0.0007
emSlope_prc <- emtrends(a.rois.dist_mem2, "curHighLow",var="A_PRC_mask_MNI152_bin_p2c")
emSlope_prc
##  curHighLow A_PRC_mask_MNI152_bin_p2c.trend    SE  df asymp.LCL asymp.UCL
##  0                                   -0.849 0.171 Inf     -1.19   -0.5131
##  1                                   -0.371 0.173 Inf     -0.71   -0.0328
## 
## Confidence level used: 0.95
pairs(emSlope_prc)
##  contrast estimate    SE  df z.ratio p.value
##  0 - 1      -0.478 0.201 Inf -2.378  0.0174
# Random slope model
a.rois.dist_mem3 <-  glmer(Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + (A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c):curHighLow + (1 + A_PHC_mask_MNI152_bin_p2c|subj), family = binomial, data=dist_raw,control=glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=5000)))
anova(a.rois.dist_mem2,a.rois.dist_mem3)
## Data: dist_raw
## Models:
## a.rois.dist_mem2: Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + (A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c):curHighLow + (1 | subj)
## a.rois.dist_mem3: Memory ~ A_HPC_p2c + A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c + (A_PHC_mask_MNI152_bin_p2c + A_PRC_mask_MNI152_bin_p2c):curHighLow + (1 + A_PHC_mask_MNI152_bin_p2c | subj)
##                  npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## a.rois.dist_mem2    7 3994.9 4037.3 -1990.4   3980.9                     
## a.rois.dist_mem3    9 3994.6 4049.2 -1988.3   3976.6 4.3068  2     0.1161

During answer phase, PRC and PHC are significant predictors and remained significant after controlling for univariate activation. There was a significant interaction of both parameters with curiosity. In both PRC and PHC, this was driven by a steeper slope during low curiosity trials than during high curiosity trials. The inclusion of a PRC random slope led to a singular fit, and the inclusion of PHC random slope did not lead to better fit.

Plot parameter estimates without interaction (Answer)

betas<-fixef(a.rois.dist_mem)[2:4]
odds<-exp(betas)
se<-sqrt(diag(vcov(a.rois.dist_mem)))[2:4]
oddse<-sqrt(odds^2 * se)
pdat<-data.frame(betas)
pdat$odds<-odds
pdat$se<-se
pdat$oddse<-oddse

rownames<-c(
            "HPC\nDistance\n",
            "PHC\nDistance\n",
            "PRC\nDistance\n")

sortedrownames<-c( "HPC\nDistance\n",
                   "PHC\nDistance\n",
                   "PRC\nDistance\n")

sortedrownames2<-c( 
                    "PRC\nDistance\n",
                    "PHC\nDistance\n",
                    "HPC\nDistance\n")


pdat$labels<-rownames
pdat$labels <-factor(pdat$labels,levels=sortedrownames)

## Beta on X-axis
ggplot(pdat,aes(x=betas,y=labels)) +
  geom_point(data=pdat, aes(x=betas,y=labels), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(y=labels, xmin=betas-se,xmax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_vline(xintercept=0,linetype="dashed")  +
  labs(y="", x="Parameter Estimate") + 
  coord_cartesian(xlim=c(-0.9,0.8)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_A_Mem_PE1.eps",sep="")
  #ggsave(fname, device=cairo_ps)
}

pdat$labels <-factor(pdat$labels,levels=sortedrownames2)
## Beta on Y-axis
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_point(data=pdat, aes(x=labels,y=betas), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="dashed")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.9,0.8)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_A_Mem_PE2.eps",sep="")
  #ggsave(fname, device=cairo_ps)
}

## Beta on Y-axis (bar graphs)
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_bar(data=pdat, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=.6, color="black") + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="solid")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.9,0.8)) +
  #annotate(x=1,y= -.9, geom="text",label="**",size=10) +
  theme_classic(base_size=theme_bs) +
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_A_Mem_PE_bar.eps",sep="")
  #ggsave(fname, device=cairo_ps)
}

## Separate PE plot for each ROI
for (i in 1:3){
  pdat_t = pdat[i,]
  ggplot(pdat_t,aes(x=labels,y=betas)) +
    geom_bar(data=pdat_t, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=1, color="black") + 
    geom_errorbar(data=pdat_t,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
    geom_hline(yintercept=0,linetype="solid")  +
    labs(x="", y="Parameter Estimate") + 
    #annotate(x=1,y=.2,geom="text",label="*",size=10) +
    coord_cartesian(ylim=c(-0.9,0.5),xlim=c(.5,1.5)) +
    theme_classic(base_size=theme_bs) +
    theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),axis.ticks.x=element_blank(), axis.text.x=element_blank())
  wroi <- substr(pdat_t$labels,1,3)
  if (figsave == 1){
    fname <-paste(figfldr,wroi , "_Dist_A_Mem_PE_", plotbar_width, ".pdf",sep="")
    ggsave(fname, width=plotbar_width,height=4.5)
  }
}

Slopes for each ROI on predicted memory outcome (Answer)

wroi <- c("A_HPC_p2c", "A_PHC_mask_MNI152_bin_p2c", "A_PRC_mask_MNI152_bin_p2c")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
tmp_dat$FittedMem <- fitted(a.rois.dist_mem)
tmp_dat$FittedMem_dm <- tmp_dat$FittedMem - ave(tmp_dat$FittedMem,tmp_dat$subj,FUN=mean)

## HPC
tmp_dat$HPC_p2cZ <- ave(tmp_dat$A_HPC_p2c,tmp_dat$subj,FUN=scale)
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = HPC_p2cZ, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = HPC_p2cZ, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.15,.15)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid (z)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_aHPC-Mem.pdf",sep="")
  #ggsave(fname, device=cairo_ps)
  ggsave(fname, device=cairo_pdf)
}

## PHC
tmp_dat$PHC_p2cZ <- ave(tmp_dat$A_PHC_mask_MNI152_bin_p2c,tmp_dat$subj,FUN=scale)
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PHC_p2cZ, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PHC_p2cZ, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.15,.15)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid (z)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_aPHC-Mem.pdf",sep="")
  #ggsave(fname, device=cairo_ps)
  ggsave(fname, device=cairo_pdf)
}

## PRC
tmp_dat$PRC_p2cZ <- ave(tmp_dat$A_PRC_mask_MNI152_bin_p2c,tmp_dat$subj,FUN=scale)
ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PRC_p2cZ, y = FittedMem_dm, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = PRC_p2cZ, y = FittedMem_dm), color='black',size=1.8, alpha=.8, se=FALSE) +
  coord_cartesian(ylim=c(-.15,.15)) +
  labs(y="Δ Probability of Recall", x="Distance from centroid (z)") +
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_Dist_aPRC-Mem.pdf",sep="")
  #ggsave(fname, device=cairo_ps)
  ggsave(fname, device=cairo_pdf)
}

Plot parameter estimates WITH interaction (Answer)

betas<-fixef(a.rois.dist_mem2)[2:6]
odds<-exp(betas)
se<-sqrt(diag(vcov(a.rois.dist_mem2)))[2:6]
oddse<-sqrt(odds^2 * se)
pdat<-data.frame(betas)
pdat$odds<-odds
pdat$se<-se
pdat$oddse<-oddse

rownames<-c(
            "HPC\nDistance\n",
            "PHC\nDistance\n",
            "PRC\nDistance\n",
            "PHC\nx\nCuriosity\n",
            "PRC\nx\nCuriosity\n")

sortedrownames<-c( "PHC\nx\nCuriosity\n",
                   "PRC\nx\nCuriosity\n",
                   "HPC\nDistance\n",
                   "PHC\nDistance\n",
                   "PRC\nDistance\n")

sortedrownames2<-c( 
                    "PRC\nDistance\n",
                    "PHC\nDistance\n",
                    "HPC\nDistance\n",
                    "PRC\nx\nCuriosity\n",
                    "PHC\nx\nCuriosity\n")

pdat$labels<-rownames
pdat$labels <-factor(pdat$labels,levels=sortedrownames)

## Beta on X-axis
ggplot(pdat,aes(x=betas,y=labels)) +
  geom_point(data=pdat, aes(x=betas,y=labels), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(y=labels, xmin=betas-se,xmax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_vline(xintercept=0,linetype="dashed")  +
  labs(y="", x="Parameter Estimate") + 
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_A_Mem_PE1.eps",sep="")
  #ggsave(fname, device=cairo_ps)
}

pdat$labels <-factor(pdat$labels,levels=sortedrownames2)
## Beta on Y-axis
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_point(data=pdat, aes(x=labels,y=betas), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="dashed")  +
  labs(x="", y="Parameter Estimate") + 
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_A_Mem_PE2.eps",sep="")
  #ggsave(fname, device=cairo_ps)
}

## Beta on Y-axis (bar graphs)
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_bar(data=pdat, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=.6, color="black") + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="solid")  +
  labs(x="", y="Parameter Estimate") + 
  theme_classic(base_size=theme_bs) +
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_A_Mem_PE_bar.eps",sep="")
  #ggsave(fname, device=cairo_ps)
}

Distance Analysis with VTA

To examine the relationship between VTA activation and representational distance, trial level univariate VTA activation was used as a predictor for trial level Representational distance.

## HPC Distance
wroi <- c("Q_HPC_p2c","Q_HPC_mean_act","Q_VTA_50_noSN_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
colnames(tmp_dat)[4] <- "Dist"
colnames(tmp_dat)[5] <- "Act"
colnames(tmp_dat)[6] <- "VTA_Act"

q.hpc.dist_vta <- lmer(Dist ~ VTA_Act + (1|subj),data = tmp_dat, REML=F)
summary(q.hpc.dist_vta)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ VTA_Act + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1744.5  -1720.3    876.3  -1752.5     3184 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4189 -0.7206 -0.0718  0.6546  3.5821 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.00392  0.06261 
##  Residual             0.03310  0.18194 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.345e-01  1.346e-02  2.307e+01   61.99   <2e-16 ***
## VTA_Act     -5.836e-02  3.579e-03  3.176e+03  -16.31   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##         (Intr)
## VTA_Act -0.038
q.hpc.dist_vta_rand <- lmer(Dist ~ VTA_Act + (1 + VTA_Act|subj),data = tmp_dat, control=lmerControl(optimizer="bobyqa",optCtrl=list(maxfun=5000)))
summary(q.hpc.dist_vta_rand)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: Dist ~ VTA_Act + (1 + VTA_Act | subj)
##    Data: tmp_dat
## Control: lmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 5000))
## 
## REML criterion at convergence: -1909.2
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.51335 -0.70303 -0.07689  0.65645  3.15350 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr
##  subj     (Intercept) 0.003431 0.05857      
##           VTA_Act     0.002835 0.05325  0.11
##  Residual             0.030874 0.17571      
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)  0.83820    0.01263 21.94725  66.374  < 2e-16 ***
## VTA_Act     -0.05866    0.01168 22.06428  -5.021 4.96e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##         (Intr)
## VTA_Act 0.089
anova(q.hpc.dist_vta,q.hpc.dist_vta_rand)
## refitting model(s) with ML (instead of REML)
## Data: tmp_dat
## Models:
## q.hpc.dist_vta: Dist ~ VTA_Act + (1 | subj)
## q.hpc.dist_vta_rand: Dist ~ VTA_Act + (1 + VTA_Act | subj)
##                     npar     AIC     BIC logLik deviance  Chisq Df Pr(>Chisq)
## q.hpc.dist_vta         4 -1744.5 -1720.3 876.27  -1752.5                     
## q.hpc.dist_vta_rand    6 -1911.3 -1874.9 961.63  -1923.3 170.72  2  < 2.2e-16
##                        
## q.hpc.dist_vta         
## q.hpc.dist_vta_rand ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Controlling for univariate activation in HPC
q.hpc.dist_vta_hpc <- lmer(Dist ~ VTA_Act + Act + (1 + VTA_Act|subj),data = tmp_dat, REML=F)
summary(q.hpc.dist_vta_hpc)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ VTA_Act + Act + (1 + VTA_Act | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1923.2  -1880.7    968.6  -1937.2     3181 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4775 -0.7104 -0.0750  0.6568  3.2903 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr
##  subj     (Intercept) 0.003106 0.05574      
##           VTA_Act     0.002559 0.05059  0.14
##  Residual             0.030761 0.17539      
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.410e-01  1.208e-02  2.307e+01  69.625  < 2e-16 ***
## VTA_Act     -4.981e-02  1.140e-02  2.506e+01  -4.370 0.000190 ***
## Act         -1.773e-02  4.732e-03  3.180e+03  -3.747 0.000182 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##         (Intr) VTA_Ac
## VTA_Act  0.129       
## Act     -0.063 -0.206
anova(q.hpc.dist_vta_rand,q.hpc.dist_vta_hpc)
## refitting model(s) with ML (instead of REML)
## Data: tmp_dat
## Models:
## q.hpc.dist_vta_rand: Dist ~ VTA_Act + (1 + VTA_Act | subj)
## q.hpc.dist_vta_hpc: Dist ~ VTA_Act + Act + (1 + VTA_Act | subj)
##                     npar     AIC     BIC logLik deviance  Chisq Df Pr(>Chisq)
## q.hpc.dist_vta_rand    6 -1911.3 -1874.9 961.63  -1923.3                     
## q.hpc.dist_vta_hpc     7 -1923.2 -1880.7 968.60  -1937.2 13.933  1  0.0001894
##                        
## q.hpc.dist_vta_rand    
## q.hpc.dist_vta_hpc  ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Controlling for Curiosity state
q.hpc.dist_vta_cur <- lmer(Dist ~ VTA_Act + VTA_Act:curHighLow + (1 + VTA_Act|subj),data = tmp_dat, REML=F)
anova(q.hpc.dist_vta_rand,q.hpc.dist_vta_cur)
## refitting model(s) with ML (instead of REML)
## Data: tmp_dat
## Models:
## q.hpc.dist_vta_rand: Dist ~ VTA_Act + (1 + VTA_Act | subj)
## q.hpc.dist_vta_cur: Dist ~ VTA_Act + VTA_Act:curHighLow + (1 + VTA_Act | subj)
##                     npar     AIC     BIC logLik deviance  Chisq Df Pr(>Chisq)
## q.hpc.dist_vta_rand    6 -1911.3 -1874.9 961.63  -1923.3                     
## q.hpc.dist_vta_cur     7 -1909.7 -1867.2 961.85  -1923.7 0.4393  1     0.5075
## VTA-HPC relation using only Low curiosity trials
tmp_dat2 <-subset(tmp_dat,curHighLow==0)
q.hpc.dist_vta_lc <- lmer(Dist ~ VTA_Act + (1 |subj),data = tmp_dat2, REML=F)
summary(q.hpc.dist_vta_lc)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ VTA_Act + (1 | subj)
##    Data: tmp_dat2
## 
##      AIC      BIC   logLik deviance df.resid 
##   -819.5   -798.1    413.8   -827.5     1585 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4062 -0.7195 -0.0483  0.6561  3.5393 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.004403 0.06636 
##  Residual             0.033642 0.18342 
## Number of obs: 1589, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.406e-01  1.460e-02  2.299e+01   57.58   <2e-16 ***
## VTA_Act     -6.093e-02  5.101e-03  1.577e+03  -11.94   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##         (Intr)
## VTA_Act -0.034
q.hpc.dist_vta_rand_lc <- lmer(Dist ~ VTA_Act + (1 + VTA_Act |subj),data = tmp_dat2, control=lmerControl(optimizer="bobyqa",optCtrl=list(maxfun=5000)))
summary(q.hpc.dist_vta_rand_lc)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: Dist ~ VTA_Act + (1 + VTA_Act | subj)
##    Data: tmp_dat2
## Control: lmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 5000))
## 
## REML criterion at convergence: -863.8
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.49924 -0.71054 -0.05978  0.65139  3.15825 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr
##  subj     (Intercept) 0.003870 0.06221      
##           VTA_Act     0.002304 0.04801  0.11
##  Residual             0.031930 0.17869      
## Number of obs: 1589, groups:  subj, 23
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)  0.84216    0.01377 21.77388  61.179  < 2e-16 ***
## VTA_Act     -0.06198    0.01127 21.68481  -5.498 1.67e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##         (Intr)
## VTA_Act 0.077
anova(q.hpc.dist_vta_lc,q.hpc.dist_vta_rand_lc)
## refitting model(s) with ML (instead of REML)
## Data: tmp_dat2
## Models:
## q.hpc.dist_vta_lc: Dist ~ VTA_Act + (1 | subj)
## q.hpc.dist_vta_rand_lc: Dist ~ VTA_Act + (1 + VTA_Act | subj)
##                        npar     AIC     BIC logLik deviance  Chisq Df
## q.hpc.dist_vta_lc         4 -819.54 -798.06 413.77  -827.54          
## q.hpc.dist_vta_rand_lc    6 -865.72 -833.50 438.86  -877.72 50.177  2
##                        Pr(>Chisq)    
## q.hpc.dist_vta_lc                    
## q.hpc.dist_vta_rand_lc  1.271e-11 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

VTA was a significant predictor of representation distance in the HPC, This remained significant after controlling for univariate HPC actviation and controlling for curiosity states. The inclusion of a VTA random slope resulted in a better model fit, so all models included VTA as a random slope. Note that when both VTA and Curiosity are included as regressors, only VTA was a significant predictor.

Plot parameter estimate of VTA

betas<-fixef(q.hpc.dist_vta_rand)[2]
odds<-exp(betas)
se<-sqrt(diag(vcov(q.hpc.dist_vta_rand)))[2]
oddse<-sqrt(odds^2 * se)
pdat<-data.frame(betas)
pdat$odds<-odds
pdat$se<-se
pdat$oddse<-oddse

rownames<-c("VTA\nActivation\n")
pdat$labels<-rownames


## Beta on X-axis
ggplot(pdat,aes(x=betas,y=labels)) +
  geom_point(data=pdat, aes(x=betas,y=labels), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(y=labels, xmin=betas-se,xmax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_vline(xintercept=0,linetype="dashed")  +
  labs(y="", x="Parameter Estimate") + 
  coord_cartesian(xlim=c(-0.09,0.05)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_Q_Mem_PE1.eps",sep="")
  #ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_VTA_Q_HPC_Dist_PE1.pdf",sep="")
  ggsave(fname)
}

## Beta on Y-axis
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_point(data=pdat, aes(x=labels,y=betas), size = mean_dotsize, shape = 20,alpha=1,show.legend=F) + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="dashed")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.09,0.05)) +
  theme_bw(base_size=theme_bs) 

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_Q_Mem_PE2.eps",sep="")
  #ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_VTA_Q_HPC_Dist_PE2.pdf",sep="")
  ggsave(fname)
}

## Beta on Y-axis (bar graphs)  
ggplot(pdat,aes(x=labels,y=betas)) +
  geom_bar(data=pdat, aes(x=labels,y=betas),alpha=1,show.legend=F, stat="identity", width=.6, color="black") + 
  geom_errorbar(data=pdat,aes(x=labels, ymin=betas-se,ymax=betas+se), width=0,lwd=1,alpha=erbar_alpha) + 
  geom_hline(yintercept=0,linetype="solid")  +
  labs(x="", y="Parameter Estimate") + 
  coord_cartesian(ylim=c(-0.09,0.05)) +
  theme_classic(base_size=theme_bs) +
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),axis.ticks.x=element_blank(), axis.text.x=element_blank())

if (figsave == 1){
  #fname <-paste(figfldr, "fMRI_Dist_Q_Mem_PE_bar.eps",sep="")
  #ggsave(fname, device=cairo_ps)
  fname <-paste(figfldr, "fMRI_VTA_Q_HPC_Dist_PE_bar_", plotbar_width, ".pdf",sep="")
  ggsave(fname, width=plotbar_width,height=4.5)
}

Plot relationship of VTA activation and HPC distance

## Plot VTA-HPC distance (raw)
## Combine across Curiosity states (use data from q.hpc.dist_vta_hpc_rand_cur)
wroi <- c("Q_HPC_p2c","Q_HPC_mean_act","Q_VTA_50_noSN_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
colnames(tmp_dat)[4] <- "Dist"
colnames(tmp_dat)[5] <- "Act"
colnames(tmp_dat)[6] <- "VTA_Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)
tmp_dat$ActZ <- ave(tmp_dat$Act,tmp_dat$subj,FUN=scale)
tmp_dat$VTA_ActZ <- ave(tmp_dat$VTA_Act,tmp_dat$sub,FUN=scale)
tmp_dat$FittedDist <- predict(q.hpc.dist_vta_rand)
tmp_dat$FittedDistZ <- ave(tmp_dat$FittedDist, tmp_dat$subj,FUN=scale)

ggplot() + 
   geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = VTA_Act, y = FittedDist, group = subj), colour = "lightgray",size=1, alpha=0.4, se=FALSE,show.legend=F) + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = VTA_Act, y = FittedDist), color='black',size=1.8, alpha=.8, se=FALSE) +
  labs(y="HPC\nDistance from centroid", x="Anticipatory VTA BOLD (a.u)") + 
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_VTA-qHPC_raw.pdf",sep="")
  ggsave(fname)
}

## With raw scatter (mean slope)
ggplot() + 
    geom_point(data=dist_raw,aes(x=Q_VTA_50_noSN_bin_mean_act,y=Q_HPC_p2c),alpha=.05,show.legend=F) +
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = VTA_Act, y = FittedDist), color='black',size=1.8, alpha=.8, se=FALSE) +
  labs(y="HPC\nDistance from centroid", x="Anticipatory VTA BOLD (a.u)") + 
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_VTA-qHPC_raw_meanslope_scatter.pdf",sep="")
  ggsave(fname)
}

## With raw scatter (individual slope)
ggplot() + 
    geom_point(data=dist_raw,aes(x=Q_VTA_50_noSN_bin_mean_act,y=Q_HPC_p2c),alpha=.05,show.legend=F) +
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = VTA_Act, y = FittedDist, group = subj), colour = "black",size=1, alpha=.8, se=FALSE,show.legend=F) + 
  labs(y="HPC\nDistance from centroid", x="Anticipatory VTA BOLD (a.u)") + 
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_VTA-qHPC_raw_indivslope_scatter.pdf",sep="")
  ggsave(fname)
}

Separate plot for each curiosity level

wroi <- c("Q_HPC_p2c","Q_HPC_mean_act","Q_VTA_50_noSN_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
colnames(tmp_dat)[4] <- "Dist"
colnames(tmp_dat)[5] <- "Act"
colnames(tmp_dat)[6] <- "VTA_Act"
tmp_dat$DistZ <- ave(tmp_dat$Dist,tmp_dat$subj,FUN=scale)
tmp_dat$ActZ <- ave(tmp_dat$Act,tmp_dat$subj,FUN=scale)
tmp_dat$VTA_ActZ <- ave(tmp_dat$VTA_Act,tmp_dat$sub,FUN=scale)
tmp_dat$FittedDist <- predict(q.hpc.dist_vta_cur)
tmp_dat_hc <- subset(tmp_dat,tmp_dat$curHighLow==1)
tmp_dat_lc <- subset(tmp_dat,tmp_dat$curHighLow==0)
tmp_dat_hc$FittedDistZ <- ave(tmp_dat_hc$FittedDist,tmp_dat_hc$subj,FUN=scale)
tmp_dat_lc$FittedDistZ <- ave(tmp_dat_lc$FittedDist,tmp_dat_lc$subj,FUN=scale)
tmp_dat <-rbind(tmp_dat_hc,tmp_dat_lc)


ggplot() + 
    geom_line(data=tmp_dat,stat="smooth",method="lm", aes(x = VTA_Act, y = FittedDist,color=curHighLow),size=1.8, alpha=.8, se=FALSE) +
  scale_color_manual(values=c(lcolor1,hcolor1),breaks=c("0","1"),labels=c("Low","High")) + 
  guides(color=guide_legend(reverse=TRUE)) +
  coord_cartesian(ylim=c(0.25,1.25)) +
  labs(y="Predicted Distance", x="Anticipatory VTA BOLD (a.u)",color = "Curiosity") + 
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "fMRI_VTA-qHPC_Cur_raw.pdf",sep="")
  ggsave(fname)
}

VTA Activation and HPC representation on memory

To examine if VTA and HPC contributed unique variance a hierarhical regression was performed to examine if inclusion of the factors resulted in better model fit.

wroi <- c("Q_HPC_p2c","Q_HPC_mean_act","Q_VTA_50_noSN_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
colnames(tmp_dat)[4] <- "Dist"
colnames(tmp_dat)[5] <- "Act"
colnames(tmp_dat)[6] <- "VTA_Act"

# Individual models
m1 <- glmer(Memory ~ VTA_Act + (1|subj), family = binomial, data=tmp_dat)
m2 <- glmer(Dist ~ VTA_Act + (1|subj),data = tmp_dat)
## Warning in glmer(Dist ~ VTA_Act + (1 | subj), data = tmp_dat): calling glmer()
## with family=gaussian (identity link) as a shortcut to lmer() is deprecated;
## please call lmer() directly
m3 <- glmer( Memory ~ VTA_Act + Dist + (1|subj),family=binomial,data=tmp_dat)
m4 <- glmer( Memory ~ Dist + (1|subj),family=binomial,data=tmp_dat)

# Model Comparison
anova(m3,m1)
## Data: tmp_dat
## Models:
## m1: Memory ~ VTA_Act + (1 | subj)
## m3: Memory ~ VTA_Act + Dist + (1 | subj)
##    npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)  
## m1    3 4199.6 4217.8 -2096.8   4193.6                       
## m3    4 4196.4 4220.6 -2094.2   4188.4 5.1977  1    0.02262 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
anova(m3,m4)
## Data: tmp_dat
## Models:
## m4: Memory ~ Dist + (1 | subj)
## m3: Memory ~ VTA_Act + Dist + (1 | subj)
##    npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)  
## m4    3 4197.2 4215.4 -2095.6   4191.2                       
## m3    4 4196.4 4220.6 -2094.2   4188.4 2.8056  1    0.09394 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Mediation
results <- mediate(m2,m3,treat="VTA_Act",mediator="Dist",boot=F,sims=1000)
summary(results)
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
## Mediator Groups: subj 
## 
## Outcome Groups: subj 
## 
## Output Based on Overall Averages Across Groups 
## 
##                          Estimate 95% CI Lower 95% CI Upper p-value   
## ACME (control)            0.00623      0.00121         0.01   0.020 * 
## ACME (treated)            0.00622      0.00121         0.01   0.020 * 
## ADE (control)             0.01637     -0.00221         0.03   0.092 . 
## ADE (treated)             0.01636     -0.00221         0.03   0.092 . 
## Total Effect              0.02259      0.00511         0.04   0.010 **
## Prop. Mediated (control)  0.27660      0.04481         1.29   0.030 * 
## Prop. Mediated (treated)  0.27609      0.04467         1.29   0.030 * 
## ACME (average)            0.00622      0.00121         0.01   0.020 * 
## ADE (average)             0.01636     -0.00221         0.03   0.092 . 
## Prop. Mediated (average)  0.27634      0.04474         1.29   0.030 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Sample Size Used: 3188 
## 
## 
## Simulations: 1000
#summary(results,output="byeffect")
#plot(results)

# Test mediation with control and treatment contrasting 25th and 75th percentile for VTA activity
results2<-mediate(m2,m3,treat="VTA_Act",mediator="Dist",control.value=-.4,treat.value=.72,boot=F,sims=1000)
summary(results2)
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
## Mediator Groups: subj 
## 
## Outcome Groups: subj 
## 
## Output Based on Overall Averages Across Groups 
## 
##                           Estimate 95% CI Lower 95% CI Upper p-value  
## ACME (control)            0.006713     0.000734         0.01   0.028 *
## ACME (treated)            0.006703     0.000732         0.01   0.028 *
## ADE (control)             0.018401    -0.004900         0.04   0.124  
## ADE (treated)             0.018392    -0.004910         0.04   0.124  
## Total Effect              0.025104     0.002734         0.05   0.028 *
## Prop. Mediated (control)  0.257394    -0.020477         1.49   0.056 .
## Prop. Mediated (treated)  0.257512    -0.020478         1.49   0.056 .
## ACME (average)            0.006708     0.000732         0.01   0.028 *
## ADE (average)             0.018396    -0.004905         0.04   0.124  
## Prop. Mediated (average)  0.257453    -0.020477         1.49   0.056 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Sample Size Used: 3188 
## 
## 
## Simulations: 1000

Individual difference analysis

## Compute individual behavioral performance
tmp_beh <- beh_avg2[c("subj","curHighLow","mem")]
tmp_beh2 <- spread(tmp_beh,curHighLow,mem)
colnames(tmp_beh2)[2:3] <- c("Low","High")
tmp_beh2$Avg <- (tmp_beh2$Low + tmp_beh2$High)/2
tmp_beh2$Diff <- (tmp_beh2$High - tmp_beh2$Low)

## Brain Measures
wroi <- c("Q_HPC_p2c","Q_HPC_mean_act","Q_VTA_50_noSN_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Memory",wroi)]
colnames(tmp_dat)[4] <- "Dist"
colnames(tmp_dat)[5] <- "Act"
colnames(tmp_dat)[6] <- "VTA_Act"

## VTA activation
avg_vta <- aggregate(VTA_Act~subj*curHighLow,tmp_dat,mean)
colnames(avg_vta)[3]<-"VTA"
avg_vta <- spread(avg_vta,curHighLow,VTA)
colnames(avg_vta)[2:3] <- c("VTA_Low","VTA_High")
avg_vta$VTA_Avg <- (avg_vta$VTA_High + avg_vta$VTA_Low) / 2
avg_vta$VTA_Diff <- (avg_vta$VTA_High - avg_vta$VTA_Low) #/ (avg_vta$VTA_High + avg_vta$VTA_Low)

tmp_dat2 <- merge(tmp_beh2,avg_vta,by="subj")

## HPC Distance
avg_dist <- aggregate(Dist~subj * curHighLow,tmp_dat,mean)
colnames(avg_dist)[3]<-"Dist_Mean"
avg_dist <- spread(avg_dist,curHighLow,Dist_Mean)
colnames(avg_dist)[2:3] <- c("RD_Low","RD_High")
avg_dist$RD_Avg <- (avg_dist$RD_High + avg_dist$RD_Low) / 2
avg_dist$RD_Diff <- (avg_dist$RD_Low - avg_dist$RD_High) #/ (avg_dist$RD_High + avg_dist$RD_Low)

tmp_dat2 <- merge(tmp_dat2,avg_dist,by="subj")

Relation of VTA activation with HPC stability

cor.test(tmp_dat2$VTA_Avg,tmp_dat2$RD_Avg)
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$VTA_Avg and tmp_dat2$RD_Avg
## t = -3.1068, df = 21, p-value = 0.005339
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.7905046 -0.1937716
## sample estimates:
##        cor 
## -0.5611528
cor.test(tmp_dat2$VTA_Diff,tmp_dat2$RD_Diff)
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$VTA_Diff and tmp_dat2$RD_Diff
## t = 0.25569, df = 21, p-value = 0.8007
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.3648716  0.4574078
## sample estimates:
##        cor 
## 0.05570935
cor.test(tmp_dat2$VTA_High,tmp_dat2$RD_High)
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$VTA_High and tmp_dat2$RD_High
## t = -3.2323, df = 21, p-value = 0.003992
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.7988055 -0.2153553
## sample estimates:
##        cor 
## -0.5763913
cor.test(tmp_dat2$VTA_Low,tmp_dat2$RD_Low)
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$VTA_Low and tmp_dat2$RD_Low
## t = -2.2726, df = 21, p-value = 0.03367
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.72391843 -0.03928662
## sample estimates:
##        cor 
## -0.4442939
## Plots across both conditions
ggplot(tmp_dat2, aes(x = VTA_Avg, y = RD_Avg)) +
    geom_point(size=2.5,alpha=.6) +
    geom_line(data=tmp_dat2,stat="smooth",method="lm", aes(x = VTA_Avg, y = RD_Avg), color='black',size=1.8, alpha=.8, se=FALSE) +
  labs(y="HPC\nRepresentation Distance", x="VTA BOLD (a.u.)") + 
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'

if (figsave == 1){
  fname <-paste(figfldr, "IndivCorr_VTA-HPC.pdf",sep="")
  ggsave(fname,device=cairo_pdf)
}

VTA activation is significantly correlated with HPC stability across individual (r = -0.56). This correlation was observed for both High (r = -0.58) and Low curiosity (r = -0.44), but was not observed when correlating the difference (i.e. High - Low).

Relation of HPC stability with Memory performance

### Across both conditions
cor.test(tmp_dat2$Diff,tmp_dat2$RD_Diff, method="pearson")
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$Diff and tmp_dat2$RD_Diff
## t = -0.21487, df = 21, p-value = 0.8319
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.4503439  0.3725588
## sample estimates:
##         cor 
## -0.04683598
cor.test(tmp_dat2$Avg,tmp_dat2$RD_Diff, method="pearson")
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$Avg and tmp_dat2$RD_Diff
## t = 2.2259, df = 21, p-value = 0.03711
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.03014062 0.71953123
## sample estimates:
##       cor 
## 0.4369146
### Separately for each condition
cor.test(tmp_dat2$High,tmp_dat2$RD_High, method="pearson")
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$High and tmp_dat2$RD_High
## t = -1.4643, df = 21, p-value = 0.1579
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.6366943  0.1232975
## sample estimates:
##        cor 
## -0.3043741
cor.test(tmp_dat2$Low,tmp_dat2$RD_Low, method="pearson")
## 
##  Pearson's product-moment correlation
## 
## data:  tmp_dat2$Low and tmp_dat2$RD_Low
## t = 0.51154, df = 21, p-value = 0.6143
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.3157005  0.5002640
## sample estimates:
##       cor 
## 0.1109384
### Plot
ggplot(tmp_dat2, aes(x = RD_Diff, y = Avg)) +
    geom_point(size=2.5,alpha=.6) +
    geom_line(data=tmp_dat2,stat="smooth",method="lm", aes(x = RD_Diff, y = Avg), color='black',size=1.8, alpha=.8, se=FALSE) +
  #labs(y="Memory Recall", x="Representation Distance(Δ)") + 
  labs(y="Memory Recall", x="HPC\nModulated Distance(Δ)") + 
  theme_classic(base_size=theme_bs) + 
  theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
## `geom_smooth()` using formula 'y ~ x'

we did not observe a significant correlation between the difference in curiosity-related hippocampal convergence (Low - High curiosity) and memory benefits (r = -.05, p = .831), we did observe a significant correlation between curiosity-related hippocampal convergence and overall memory performance (across both conditions) whereby a greater curiosity-related hippocampal convergence was associated with better memory performance (r = .44, p = .037).

Other Control Analyses

Behavioral

Comparing differences in number of Recall and Forgotten trials

beh_raw2 <- beh_raw
beh_raw2$Misses <- beh_raw2$Hits==0
beh_tmp = beh_raw2 %>% group_by(subj) %>% summarise(hit_count = sum(Hits),
                                                    miss_count = sum(Misses)
)
## `summarise()` ungrouping output (override with `.groups` argument)
mean(beh_tmp$hit_count)
## [1] 71.08696
sd(beh_tmp$hit_count)
## [1] 22.75744
mean(beh_tmp$miss_count)
## [1] 67.52174
sd(beh_tmp$miss_count)
## [1] 22.0781
count_mem <- t.test(beh_tmp$hit_count, beh_tmp$miss_count, paired=T)
count_mem
## 
##  Paired t-test
## 
## data:  beh_tmp$hit_count and beh_tmp$miss_count
## t = 0.39758, df = 22, p-value = 0.6948
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -15.03204  22.16247
## sample estimates:
## mean of the differences 
##                3.565217

There was no significant difference in the number of trials that were recalled vs forgotten.

Convergence Measures

wroi <- c("Q_HPC_p2c","Q_HPC_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","likelihoodZ",wroi)]
colnames(tmp_dat)[4] <- "Dist"
colnames(tmp_dat)[5] <- "Act"

q.hpc.dist_cur_ctrl <- lmer(Dist ~ curHighLow + likelihoodZ + (1|subj),data = tmp_dat, REML=F)
summary(q.hpc.dist_cur_ctrl)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + likelihoodZ + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1493.5  -1463.2    751.8  -1503.5     3183 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3780 -0.6949 -0.0753  0.6339  3.8455 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.004577 0.06765 
##  Residual             0.035771 0.18913 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.342e-01  1.504e-02  2.659e+01  55.464   <2e-16 ***
## curHighLow1 -1.631e-02  7.232e-03  3.166e+03  -2.255   0.0242 *  
## likelihoodZ -8.815e-04  4.160e-03  3.172e+03  -0.212   0.8322    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1
## curHighLow1 -0.261       
## likelihoodZ  0.142 -0.376
emmeans(q.hpc.dist_cur_ctrl, list(pairwise ~ curHighLow), adjust = "tukey",pbkrtest.limit = 3188)
## $`emmeans of curHighLow`
##  curHighLow emmean     SE   df lower.CL upper.CL
##  0           0.834 0.0153 27.1    0.798    0.870
##  1           0.818 0.0153 27.1    0.782    0.854
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $`pairwise differences of curHighLow`
##  contrast estimate      SE   df t.ratio p.value
##  0 - 1      0.0163 0.00723 3168 2.254   0.0243 
## 
## Degrees-of-freedom method: kenward-roger

Curiosity remained a significant predictor of convergence after controlling for self-reported likelihood of knowing.

Modeling Action effects on convergence

wroi <- c("Q_HPC_p2c","Q_HPC_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","Motor",wroi)] 
colnames(tmp_dat)[4]<-"Dist"
colnames(tmp_dat)[5] <-"Act"

# Curiosity x Motor
q.hpc.dist_cm_ctrl <- lmer(Dist ~ curHighLow * Motor + (1|subj),data = tmp_dat, REML=F)
summary(q.hpc.dist_cm_ctrl)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow * Motor + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1496.6  -1460.2    754.3  -1508.6     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4230 -0.7057 -0.0767  0.6375  3.8228 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.004575 0.06764 
##  Residual             0.035714 0.18898 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                      Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)         8.436e-01  1.560e-02  3.081e+01  54.071   <2e-16 ***
## curHighLow1        -2.023e-02  9.414e-03  3.165e+03  -2.148   0.0318 *  
## Motor1             -1.826e-02  9.486e-03  3.165e+03  -1.925   0.0543 .  
## curHighLow1:Motor1  6.882e-03  1.340e-02  3.165e+03   0.514   0.6075    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1 Motor1
## curHighLow1 -0.302              
## Motor1      -0.300  0.496       
## crHghLw1:M1  0.213 -0.703 -0.708

Curiosity remained a significant predictor of convergence after controlling for action.

Modeling Delay Interval

# Convert delay to factor
dist_raw$delay <-as.factor(dist_raw$delay)
wroi <- c("Q_HPC_p2c","Q_HPC_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","delay",wroi)] 
colnames(tmp_dat)[4]<-"Dist"
colnames(tmp_dat)[5] <- "Act"


# Curiosity & Delay
q.hpc.dist_cd_ctrl <- lmer(Dist ~ curHighLow + delay + (1|subj),data = tmp_dat, REML=F)
summary(q.hpc.dist_cd_ctrl)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ curHighLow + delay + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1496.9  -1466.5    753.4  -1506.9     3183 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4097 -0.7021 -0.0728  0.6371  3.8186 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.00458  0.06767 
##  Residual             0.03573  0.18903 
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.408e-01  1.526e-02  2.817e+01  55.090   <2e-16 ***
## curHighLow1 -1.683e-02  6.697e-03  3.165e+03  -2.513    0.012 *  
## delay13     -1.232e-02  6.697e-03  3.165e+03  -1.839    0.066 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) crHgL1
## curHighLow1 -0.219       
## delay13     -0.219 -0.005

Curiosity remained a significant predictor of convergence after controlling for delay.

q.rois.dist_mem_delay_ctrl <- glmer(Memory ~Q_HPC_p2c + Q_PHC_mask_MNI152_bin_p2c + Q_PRC_mask_MNI152_bin_p2c + delay + (1|subj), family = binomial, data=dist_raw)
summary(q.rois.dist_mem_delay_ctrl)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: 
## Memory ~ Q_HPC_p2c + Q_PHC_mask_MNI152_bin_p2c + Q_PRC_mask_MNI152_bin_p2c +  
##     delay + (1 | subj)
##    Data: dist_raw
## 
##      AIC      BIC   logLik deviance df.resid 
##   4202.2   4238.6  -2095.1   4190.2     3182 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.1901 -0.8980  0.4895  0.8942  1.7960 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  subj   (Intercept) 0.3862   0.6214  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##                           Estimate Std. Error z value Pr(>|z|)   
## (Intercept)                0.47442    0.23396   2.028  0.04258 * 
## Q_HPC_p2c                 -0.54082    0.20634  -2.621  0.00877 **
## Q_PHC_mask_MNI152_bin_p2c -0.06265    0.12218  -0.513  0.60810   
## Q_PRC_mask_MNI152_bin_p2c  0.06487    0.13011   0.499  0.61807   
## delay13                    0.05309    0.07422   0.715  0.47445   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Q_HPC_ Q_PHC_ Q_PRC_
## Q_HPC_p2c   -0.564                     
## Q_PHC__MNI1 -0.156 -0.273              
## Q_PRC__MNI1 -0.344 -0.122 -0.112       
## delay13     -0.187  0.033 -0.028  0.033

HPC convergence remained a significant predictor of memory after controlling for delay.

## HPC Distance
wroi <- c("Q_HPC_p2c","Q_HPC_mean_act","Q_VTA_50_noSN_bin_mean_act")
tmp_dat <- dist_raw[c("subj","curHighLow","delay",wroi)]
colnames(tmp_dat)[4] <- "Dist"
colnames(tmp_dat)[5] <- "Act"
colnames(tmp_dat)[6] <- "VTA_Act"

q.hpc.dist_vta_delay_ctrl <- lmer(Dist ~ VTA_Act + delay + (1|subj),data = tmp_dat, REML=F)
summary(q.hpc.dist_vta_delay_ctrl)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dist ~ VTA_Act + delay + (1 | subj)
##    Data: tmp_dat
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1747.9  -1717.5    878.9  -1757.9     3183 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4625 -0.7261 -0.0768  0.6474  3.6276 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  subj     (Intercept) 0.003919 0.0626  
##  Residual             0.033046 0.1818  
## Number of obs: 3188, groups:  subj, 23
## 
## Fixed effects:
##               Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)  8.420e-01  1.384e-02  2.582e+01   60.83   <2e-16 ***
## VTA_Act     -5.856e-02  3.577e-03  3.176e+03  -16.37   <2e-16 ***
## delay13     -1.488e-02  6.442e-03  3.165e+03   -2.31    0.021 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##         (Intr) VTA_Ac
## VTA_Act -0.043       
## delay13 -0.234  0.024

VTA activation remained a significant predictor of convergence after controlling for delay.

Performance across trials - Vigilance

wroi <- c("Q_HPC_p2c")
tmp_dat <- dist_raw[c("subj","run","trial",wroi)]
colnames(tmp_dat)[4] <- "Dist"
tmp_dat$run_ord <-ordered(tmp_dat$run,levels=1:6,labels=c("R1","R2","R3","R4","R5","R6"))

cvg_across_time <- lmer(Dist ~ run_ord * trial + (1|subj),data = tmp_dat, REML=F)
anova(cvg_across_time)
## Type III Analysis of Variance Table with Satterthwaite's method
##                 Sum Sq  Mean Sq NumDF  DenDF F value Pr(>F)
## run_ord       0.051799 0.010360     5 3165.0  0.2912 0.9180
## trial         0.052407 0.052407     1 3165.1  1.4732 0.2249
## run_ord:trial 0.210432 0.042086     5 3165.1  1.1830 0.3148

There was no significant main effect of Run and Trial number nor a significant interaction on Hippocampal convergence.